diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2b0f803cb742..953f6a5d4dca 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,56 @@
# CHANGELOG
+## `v33.0.0`
+
+### New Packages
+
+| Package Name | API Version |
+| -----------: | :---------: |
+| batch | 2019-08-01
2019-08-01.10.0 |
+| computervision | v2.1 |
+| containerregistry | 2019-07 |
+| containerservice | 2019-08-01 |
+| frontdoor | 2019-05-01 |
+| machinelearningservices | 2019-06-01 |
+| managednetwork | 2019-06-01-preview |
+| peering | 2019-08-01-preview |
+| policy | 2019-06-01 |
+| portal | 2018-10-01-preview |
+| servicefabric | 2019-03-01-preview |
+
+### Updated Packages
+
+| Package Name | API Version |
+| -----------: | :---------: |
+| backup | 2016-12-01
2017-07-01 |
+| frontdoor | 2019-04-01 |
+| logic | 2016-06-01
2018-07-01-preview |
+| network | 2018-07-01
2018-08-01
2018-10-01
2018-11-01
2018-12-01 |
+| resources | 2015-11-01
2016-02-01
2016-07-01
2016-09-01
2017-05-10
2018-02-01
2018-05-01
2019-03-01
2019-05-01 |
+| security | v2.0 |
+| sql | 2015-05-01-preview
2017-03-01-preview
2017-10-01-preview |
+| storage | 2019-04-01 |
+
+### BreakingChanges
+
+| Package Name | API Version |
+| -----------: | :---------: |
+| billing | 2018-11-01-preview |
+| compute | 2019-03-01
2019-07-01 |
+| datafactory | 2018-06-01 |
+| datamigration | 2017-11-15-preview
2018-03-31-preview
2018-04-19
2018-07-15-preview |
+| hanaonazure | 2017-11-03-preview |
+| healthcareapis | 2018-08-20-preview |
+| inkrecognizer | v1.0 |
+| insights | 2015-05-01 |
+| kusto | 2019-01-21 |
+| network | 2019-02-01
2019-04-01
2019-06-01 |
+| qnamaker | v4.0 |
+| reservations | 2019-04-01 |
+| security | v3.0 |
+| securityinsight | 2017-08-01-preview |
+| servicefabric | 2019-03-01 |
+
## `v32.6.0`
### New Packages
diff --git a/Gopkg.lock b/Gopkg.lock
index ba530e8934e5..35f84af43876 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -2,15 +2,7 @@
[[projects]]
- digest = "1:6b1426cad7057b717351eacf5b6fe70f053f11aac1ce254bbf2fd72c031719eb"
- name = "contrib.go.opencensus.io/exporter/ocagent"
- packages = ["."]
- pruneopts = "UT"
- revision = "dcb33c7f3b7cfe67e8a2cea10207ede1b7c40764"
- version = "v0.4.12"
-
-[[projects]]
- digest = "1:a4431dd9598c9926ff12356c100ed73807815112de703322564f76d60c5294a4"
+ digest = "1:c39603b93cda6859fa04dc23bffee64dc5db6f6f4fec93539ddef9d7e628a5da"
name = "github.com/Azure/go-autorest"
packages = [
"autorest",
@@ -25,8 +17,8 @@
"tracing",
]
pruneopts = "UT"
- revision = "2913f263500c4a5b23dada1b46ccd22ac972315f"
- version = "v12.3.0"
+ revision = "5e7a399d8bbf4953ab0c8e3167d7fd535fd74ce1"
+ version = "v13.0.0"
[[projects]]
digest = "1:55388fd080150b9a072912f97b1f5891eb0b50df43401f8b75fb4273d3fec9fc"
@@ -36,21 +28,6 @@
revision = "c7af12943936e8c39859482e61f0574c2fd7fc75"
version = "v1.4.2"
-[[projects]]
- digest = "1:8f5acd4d4462b5136af644d25101f0968a7a94ee90fcb2059cec5b7cc42e0b20"
- name = "github.com/census-instrumentation/opencensus-proto"
- packages = [
- "gen-go/agent/common/v1",
- "gen-go/agent/metrics/v1",
- "gen-go/agent/trace/v1",
- "gen-go/metrics/v1",
- "gen-go/resource/v1",
- "gen-go/trace/v1",
- ]
- pruneopts = "UT"
- revision = "d89fa54de508111353cb0b06403c00569be780d8"
- version = "v0.2.1"
-
[[projects]]
digest = "1:76dc72490af7174349349838f2fe118996381b31ea83243812a97e5a0fd5ed55"
name = "github.com/dgrijalva/jwt-go"
@@ -92,47 +69,6 @@
pruneopts = "UT"
revision = "eeefdecb41b842af6dc652aaea4026e8403e62df"
-[[projects]]
- digest = "1:b532ee3f683c057e797694b5bfeb3827d89e6adf41c53dbc80e549bca76364ea"
- name = "github.com/golang/protobuf"
- packages = [
- "jsonpb",
- "proto",
- "protoc-gen-go/descriptor",
- "protoc-gen-go/generator",
- "protoc-gen-go/generator/internal/remap",
- "protoc-gen-go/plugin",
- "ptypes",
- "ptypes/any",
- "ptypes/duration",
- "ptypes/struct",
- "ptypes/timestamp",
- "ptypes/wrappers",
- ]
- pruneopts = "UT"
- revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7"
- version = "v1.3.2"
-
-[[projects]]
- digest = "1:3b341cd71012c63aacddabfc70b9110be8e30c553349552ad3f77242843f2d03"
- name = "github.com/grpc-ecosystem/grpc-gateway"
- packages = [
- "internal",
- "runtime",
- "utilities",
- ]
- pruneopts = "UT"
- revision = "ad529a448ba494a88058f9e5be0988713174ac86"
- version = "v1.9.5"
-
-[[projects]]
- digest = "1:7fae9ec96d10b2afce0da23c378c8b3389319b7f92fa092f2621bba3078cfb4b"
- name = "github.com/hashicorp/golang-lru"
- packages = ["simplelru"]
- pruneopts = "UT"
- revision = "7f827b33c0f158ec5dfbba01bb0b14a4541fd81d"
- version = "v0.5.3"
-
[[projects]]
digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be"
name = "github.com/inconshreveable/mousetrap"
@@ -205,33 +141,6 @@
revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
version = "v1.0.3"
-[[projects]]
- digest = "1:4c93890bbbb5016505e856cb06b5c5a2ff5b7217584d33f2a9071ebef4b5d473"
- name = "go.opencensus.io"
- packages = [
- ".",
- "internal",
- "internal/tagencoding",
- "metric/metricdata",
- "metric/metricproducer",
- "plugin/ocgrpc",
- "plugin/ochttp",
- "plugin/ochttp/propagation/b3",
- "plugin/ochttp/propagation/tracecontext",
- "resource",
- "stats",
- "stats/internal",
- "stats/view",
- "tag",
- "trace",
- "trace/internal",
- "trace/propagation",
- "trace/tracestate",
- ]
- pruneopts = "UT"
- revision = "43463a80402d8447b7fce0d2c58edf1687ff0b58"
- version = "v0.19.3"
-
[[projects]]
branch = "master"
digest = "1:994c4915a59f821705d08ea77b117ec7a3e6a46cc867fd194d887500dac1c3c2"
@@ -241,68 +150,11 @@
"pkcs12/internal/rc2",
]
pruneopts = "UT"
- revision = "4def268fd1a49955bfb3dda92fe3db4f924f2285"
-
-[[projects]]
- branch = "master"
- digest = "1:d2aa096fe6b539afe74dd6ab8e9a160304707f1477b01c637b4bbe20d262a25c"
- name = "golang.org/x/net"
- packages = [
- "context",
- "http/httpguts",
- "http2",
- "http2/hpack",
- "idna",
- "internal/timeseries",
- "trace",
- ]
- pruneopts = "UT"
- revision = "ca1201d0de80cfde86cb01aea620983605dfe99b"
-
-[[projects]]
- branch = "master"
- digest = "1:382bb5a7fb4034db3b6a2d19e5a4a6bcf52f4750530603c01ca18a172fa3089b"
- name = "golang.org/x/sync"
- packages = ["semaphore"]
- pruneopts = "UT"
- revision = "112230192c580c3556b8cee6403af37a4fc5f28c"
-
-[[projects]]
- branch = "master"
- digest = "1:ec99dad7924bf972656818f5d62216fb987b7e077d401deb86b3c1e5e1b1d4d6"
- name = "golang.org/x/sys"
- packages = ["unix"]
- pruneopts = "UT"
- revision = "fc99dfbffb4e5ed5758a37e31dd861afe285406b"
-
-[[projects]]
- digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405"
- name = "golang.org/x/text"
- packages = [
- "collate",
- "collate/build",
- "internal/colltab",
- "internal/gen",
- "internal/language",
- "internal/language/compact",
- "internal/tag",
- "internal/triegen",
- "internal/ucd",
- "language",
- "secure/bidirule",
- "transform",
- "unicode/bidi",
- "unicode/cldr",
- "unicode/norm",
- "unicode/rangetable",
- ]
- pruneopts = "UT"
- revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475"
- version = "v0.3.2"
+ revision = "9756ffdc24725223350eb3266ffb92590d28f278"
[[projects]]
branch = "master"
- digest = "1:87f3ab7c6341b1be840c580bdf23dcce036916bf1859e8ab8667033a09ae6097"
+ digest = "1:5929f6d40b149acf2a65717d276ff4a2735e5b1ce13d2c68878b4a834625228e"
name = "golang.org/x/tools"
packages = [
"go/ast/astutil",
@@ -319,69 +171,7 @@
"internal/semver",
]
pruneopts = "UT"
- revision = "fc6e2057e7f6701ef9b5ef49a089bff4da7f4610"
-
-[[projects]]
- digest = "1:5f003878aabe31d7f6b842d4de32b41c46c214bb629bb485387dbcce1edf5643"
- name = "google.golang.org/api"
- packages = ["support/bundler"]
- pruneopts = "UT"
- revision = "02490b97dff7cfde1995bd77de808fd27053bc87"
- version = "v0.7.0"
-
-[[projects]]
- branch = "master"
- digest = "1:3565a93b7692277a5dea355bc47bd6315754f3246ed07a224be6aec28972a805"
- name = "google.golang.org/genproto"
- packages = [
- "googleapis/api/httpbody",
- "googleapis/rpc/status",
- "protobuf/field_mask",
- ]
- pruneopts = "UT"
- revision = "c506a9f9061087022822e8da603a52fc387115a8"
-
-[[projects]]
- digest = "1:581c9b0fe9354faf730ff231cf3682089e0b703073cf10e3976219609d27a9ea"
- name = "google.golang.org/grpc"
- packages = [
- ".",
- "balancer",
- "balancer/base",
- "balancer/roundrobin",
- "binarylog/grpc_binarylog_v1",
- "codes",
- "connectivity",
- "credentials",
- "credentials/internal",
- "encoding",
- "encoding/proto",
- "grpclog",
- "internal",
- "internal/backoff",
- "internal/balancerload",
- "internal/binarylog",
- "internal/channelz",
- "internal/envconfig",
- "internal/grpcrand",
- "internal/grpcsync",
- "internal/syscall",
- "internal/transport",
- "keepalive",
- "metadata",
- "naming",
- "peer",
- "resolver",
- "resolver/dns",
- "resolver/passthrough",
- "serviceconfig",
- "stats",
- "status",
- "tap",
- ]
- pruneopts = "UT"
- revision = "045159ad57f3781d409358e3ade910a018c16b30"
- version = "v1.22.1"
+ revision = "42f498d34c4d67145e3950193a8f23db9328e7f3"
[[projects]]
branch = "v1"
diff --git a/Gopkg.toml b/Gopkg.toml
index d418e65a1013..01b90caabc49 100644
--- a/Gopkg.toml
+++ b/Gopkg.toml
@@ -25,7 +25,7 @@
[[constraint]]
name = "github.com/Azure/go-autorest"
- version = "12.3.0"
+ version = "13.0.0"
[[constraint]]
branch = "master"
diff --git a/README.md b/README.md
index 083d60f1591f..3e6836c6b859 100644
--- a/README.md
+++ b/README.md
@@ -452,10 +452,15 @@ All packages and the runtime are instrumented using [OpenCensus](https://opencen
### Enable
-As of now, tracing is disabled by default. There are 2 ways to enable tracing:
+By default, no tracing provider will be compiled into your program, and the legacy approach of setting `AZURE_SDK_TRACING_ENABLED` environment variable will no longer take effect.
-- set the environment variable `AZURE_SDK_TRACING_ENABLED` (_Recommended_)
-- alternatively, import the `github.com/Azure/go-autorest/tracing` package and call the `tracing.Enable()` function or `tracing.EnableWithAIForwarding()` if using the [App Insights Forwarder](https://docs.microsoft.com/en-us/azure/application-insights/opencensus-local-forwarder).
+To enable tracing, you must now add the following include to your source file.
+
+``` go
+ include _ "github.com/Azure/go-autorest/tracing/opencensus"
+```
+
+To hook up a tracer simply call `tracing.Register()` passing in a type that satisfies the `tracing.Tracer` interface.
**Note**: In future major releases of the SDK, tracing may become enabled by default.
diff --git a/profiles/2017-03-09/generate.go b/profiles/2017-03-09/generate.go
index 80cab9b2c5af..d50e4164b43e 100644
--- a/profiles/2017-03-09/generate.go
+++ b/profiles/2017-03-09/generate.go
@@ -14,4 +14,4 @@
package v20170309
-//go:generate go run ../../tools/profileBuilder/main.go list --clear-output --input ./definition.json --name 2017-03-09 --output-location ./ --verbose
+//go:generate go run ../../tools/profileBuilder/main.go list --clear-output --input ./definition.json --name 2017-03-09 --output-location ./
diff --git a/profiles/2017-03-09/resources/mgmt/resources/models.go b/profiles/2017-03-09/resources/mgmt/resources/models.go
index c791818c4e2b..94a1a8b8e728 100644
--- a/profiles/2017-03-09/resources/mgmt/resources/models.go
+++ b/profiles/2017-03-09/resources/mgmt/resources/models.go
@@ -47,6 +47,7 @@ type AliasType = original.AliasType
type BaseClient = original.BaseClient
type BasicDependency = original.BasicDependency
type Client = original.Client
+type CloudError = original.CloudError
type DebugSetting = original.DebugSetting
type Dependency = original.Dependency
type Deployment = original.Deployment
@@ -68,6 +69,8 @@ type DeploymentValidateResult = original.DeploymentValidateResult
type DeploymentsClient = original.DeploymentsClient
type DeploymentsCreateOrUpdateFuture = original.DeploymentsCreateOrUpdateFuture
type DeploymentsDeleteFuture = original.DeploymentsDeleteFuture
+type ErrorAdditionalInfo = original.ErrorAdditionalInfo
+type ErrorResponse = original.ErrorResponse
type ExportTemplateRequest = original.ExportTemplateRequest
type GenericResource = original.GenericResource
type GenericResourceFilter = original.GenericResourceFilter
@@ -108,6 +111,7 @@ type TagsListResult = original.TagsListResult
type TagsListResultIterator = original.TagsListResultIterator
type TagsListResultPage = original.TagsListResultPage
type TargetResource = original.TargetResource
+type TemplateHashResult = original.TemplateHashResult
type TemplateLink = original.TemplateLink
type UpdateFuture = original.UpdateFuture
diff --git a/profiles/2018-03-01/generate.go b/profiles/2018-03-01/generate.go
index 0281bf171246..e9b17f55a462 100644
--- a/profiles/2018-03-01/generate.go
+++ b/profiles/2018-03-01/generate.go
@@ -14,4 +14,4 @@
package v20180301
-//go:generate go run ../../tools/profileBuilder/main.go list --clear-output --input ./definition.json --name 2018-03-01 --output-location ./ --verbose
+//go:generate go run ../../tools/profileBuilder/main.go list --clear-output --input ./definition.json --name 2018-03-01 --output-location ./
diff --git a/profiles/2018-03-01/resources/mgmt/resources/models.go b/profiles/2018-03-01/resources/mgmt/resources/models.go
index ec792b7371b4..e23282499986 100644
--- a/profiles/2018-03-01/resources/mgmt/resources/models.go
+++ b/profiles/2018-03-01/resources/mgmt/resources/models.go
@@ -57,6 +57,7 @@ type AliasType = original.AliasType
type BaseClient = original.BaseClient
type BasicDependency = original.BasicDependency
type Client = original.Client
+type CloudError = original.CloudError
type CreateOrUpdateByIDFuture = original.CreateOrUpdateByIDFuture
type CreateOrUpdateFuture = original.CreateOrUpdateFuture
type DebugSetting = original.DebugSetting
@@ -82,6 +83,8 @@ type DeploymentValidateResult = original.DeploymentValidateResult
type DeploymentsClient = original.DeploymentsClient
type DeploymentsCreateOrUpdateFuture = original.DeploymentsCreateOrUpdateFuture
type DeploymentsDeleteFuture = original.DeploymentsDeleteFuture
+type ErrorAdditionalInfo = original.ErrorAdditionalInfo
+type ErrorResponse = original.ErrorResponse
type ExportTemplateRequest = original.ExportTemplateRequest
type GenericResource = original.GenericResource
type GenericResourceFilter = original.GenericResourceFilter
@@ -125,6 +128,7 @@ type TagsListResult = original.TagsListResult
type TagsListResultIterator = original.TagsListResultIterator
type TagsListResultPage = original.TagsListResultPage
type TargetResource = original.TargetResource
+type TemplateHashResult = original.TemplateHashResult
type TemplateLink = original.TemplateLink
type UpdateByIDFuture = original.UpdateByIDFuture
type UpdateFuture = original.UpdateFuture
diff --git a/profiles/2019-03-01/generate.go b/profiles/2019-03-01/generate.go
index 6adb2df6a225..b7558eecff01 100644
--- a/profiles/2019-03-01/generate.go
+++ b/profiles/2019-03-01/generate.go
@@ -14,4 +14,4 @@
package v20180301
-//go:generate go run ../../tools/profileBuilder/main.go list --clear-output --input ./definition.json --name 2019-03-01 --output-location ./ --verbose
+//go:generate go run ../../tools/profileBuilder/main.go list --clear-output --input ./definition.json --name 2019-03-01 --output-location ./
diff --git a/profiles/2019-03-01/resources/mgmt/resources/models.go b/profiles/2019-03-01/resources/mgmt/resources/models.go
index 9bce7be508a0..b6b068090beb 100644
--- a/profiles/2019-03-01/resources/mgmt/resources/models.go
+++ b/profiles/2019-03-01/resources/mgmt/resources/models.go
@@ -57,6 +57,7 @@ type AliasType = original.AliasType
type BaseClient = original.BaseClient
type BasicDependency = original.BasicDependency
type Client = original.Client
+type CloudError = original.CloudError
type CreateOrUpdateByIDFuture = original.CreateOrUpdateByIDFuture
type CreateOrUpdateFuture = original.CreateOrUpdateFuture
type DebugSetting = original.DebugSetting
@@ -84,6 +85,8 @@ type DeploymentsCreateOrUpdateAtSubscriptionScopeFuture = original.DeploymentsCr
type DeploymentsCreateOrUpdateFuture = original.DeploymentsCreateOrUpdateFuture
type DeploymentsDeleteAtSubscriptionScopeFuture = original.DeploymentsDeleteAtSubscriptionScopeFuture
type DeploymentsDeleteFuture = original.DeploymentsDeleteFuture
+type ErrorAdditionalInfo = original.ErrorAdditionalInfo
+type ErrorResponse = original.ErrorResponse
type ExportTemplateRequest = original.ExportTemplateRequest
type GenericResource = original.GenericResource
type GenericResourceFilter = original.GenericResourceFilter
@@ -134,6 +137,7 @@ type TagsListResult = original.TagsListResult
type TagsListResultIterator = original.TagsListResultIterator
type TagsListResultPage = original.TagsListResultPage
type TargetResource = original.TargetResource
+type TemplateHashResult = original.TemplateHashResult
type TemplateLink = original.TemplateLink
type UpdateByIDFuture = original.UpdateByIDFuture
type UpdateFuture = original.UpdateFuture
diff --git a/profiles/latest/appinsights/mgmt/insights/models.go b/profiles/latest/appinsights/mgmt/insights/models.go
index f6c5ca747cd8..d49dfdf12ba6 100644
--- a/profiles/latest/appinsights/mgmt/insights/models.go
+++ b/profiles/latest/appinsights/mgmt/insights/models.go
@@ -190,7 +190,6 @@ type OperationListResultIterator = original.OperationListResultIterator
type OperationListResultPage = original.OperationListResultPage
type OperationsClient = original.OperationsClient
type ProactiveDetectionConfigurationsClient = original.ProactiveDetectionConfigurationsClient
-type SetObject = original.SetObject
type TagsResource = original.TagsResource
type WebTest = original.WebTest
type WebTestGeolocation = original.WebTestGeolocation
diff --git a/profiles/latest/batch/batch/batchapi/models.go b/profiles/latest/batch/batch/batchapi/models.go
index a868766ef3b4..2454cbf25796 100644
--- a/profiles/latest/batch/batch/batchapi/models.go
+++ b/profiles/latest/batch/batch/batchapi/models.go
@@ -19,7 +19,7 @@
package batchapi
-import original "github.com/Azure/azure-sdk-for-go/services/batch/2019-06-01.9.0/batch/batchapi"
+import original "github.com/Azure/azure-sdk-for-go/services/batch/2019-08-01.10.0/batch/batchapi"
type AccountClientAPI = original.AccountClientAPI
type ApplicationClientAPI = original.ApplicationClientAPI
diff --git a/profiles/latest/batch/batch/models.go b/profiles/latest/batch/batch/models.go
index 4da0853dbff3..2164f52af302 100644
--- a/profiles/latest/batch/batch/models.go
+++ b/profiles/latest/batch/batch/models.go
@@ -22,7 +22,7 @@ package batch
import (
"context"
- original "github.com/Azure/azure-sdk-for-go/services/batch/2019-06-01.9.0/batch"
+ original "github.com/Azure/azure-sdk-for-go/services/batch/2019-08-01.10.0/batch"
)
type AccessScope = original.AccessScope
@@ -371,7 +371,10 @@ type AutoPoolSpecification = original.AutoPoolSpecification
type AutoScaleRun = original.AutoScaleRun
type AutoScaleRunError = original.AutoScaleRunError
type AutoUserSpecification = original.AutoUserSpecification
+type AzureBlobFileSystemConfiguration = original.AzureBlobFileSystemConfiguration
+type AzureFileShareConfiguration = original.AzureFileShareConfiguration
type BaseClient = original.BaseClient
+type CIFSMountConfiguration = original.CIFSMountConfiguration
type Certificate = original.Certificate
type CertificateAddParameter = original.CertificateAddParameter
type CertificateClient = original.CertificateClient
@@ -454,7 +457,9 @@ type JobTerminateParameter = original.JobTerminateParameter
type JobUpdateParameter = original.JobUpdateParameter
type LinuxUserConfiguration = original.LinuxUserConfiguration
type MetadataItem = original.MetadataItem
+type MountConfiguration = original.MountConfiguration
type MultiInstanceSettings = original.MultiInstanceSettings
+type NFSMountConfiguration = original.NFSMountConfiguration
type NameValuePair = original.NameValuePair
type NetworkConfiguration = original.NetworkConfiguration
type NetworkSecurityGroupRule = original.NetworkSecurityGroupRule
diff --git a/profiles/latest/batch/mgmt/batch/batchapi/models.go b/profiles/latest/batch/mgmt/batch/batchapi/models.go
index d3cd8550e0ef..0a26813bd53e 100644
--- a/profiles/latest/batch/mgmt/batch/batchapi/models.go
+++ b/profiles/latest/batch/mgmt/batch/batchapi/models.go
@@ -19,7 +19,7 @@
package batchapi
-import original "github.com/Azure/azure-sdk-for-go/services/batch/mgmt/2019-04-01/batch/batchapi"
+import original "github.com/Azure/azure-sdk-for-go/services/batch/mgmt/2019-08-01/batch/batchapi"
type AccountClientAPI = original.AccountClientAPI
type ApplicationClientAPI = original.ApplicationClientAPI
diff --git a/profiles/latest/batch/mgmt/batch/models.go b/profiles/latest/batch/mgmt/batch/models.go
index 61608f7ddba9..381adf56f262 100644
--- a/profiles/latest/batch/mgmt/batch/models.go
+++ b/profiles/latest/batch/mgmt/batch/models.go
@@ -22,7 +22,7 @@ package batch
import (
"context"
- original "github.com/Azure/azure-sdk-for-go/services/batch/mgmt/2019-04-01/batch"
+ original "github.com/Azure/azure-sdk-for-go/services/batch/mgmt/2019-08-01/batch"
)
const (
@@ -105,6 +105,13 @@ const (
Spread ComputeNodeFillType = original.Spread
)
+type ContainerWorkingDirectory = original.ContainerWorkingDirectory
+
+const (
+ ContainerImageDefault ContainerWorkingDirectory = original.ContainerImageDefault
+ TaskWorkingDirectory ContainerWorkingDirectory = original.TaskWorkingDirectory
+)
+
type ElevationLevel = original.ElevationLevel
const (
@@ -214,7 +221,10 @@ type AutoScaleSettings = original.AutoScaleSettings
type AutoStorageBaseProperties = original.AutoStorageBaseProperties
type AutoStorageProperties = original.AutoStorageProperties
type AutoUserSpecification = original.AutoUserSpecification
+type AzureBlobFileSystemConfiguration = original.AzureBlobFileSystemConfiguration
+type AzureFileShareConfiguration = original.AzureFileShareConfiguration
type BaseClient = original.BaseClient
+type CIFSMountConfiguration = original.CIFSMountConfiguration
type Certificate = original.Certificate
type CertificateBaseProperties = original.CertificateBaseProperties
type CertificateClient = original.CertificateClient
@@ -255,6 +265,8 @@ type ListPoolsResultPage = original.ListPoolsResultPage
type LocationClient = original.LocationClient
type LocationQuota = original.LocationQuota
type MetadataItem = original.MetadataItem
+type MountConfiguration = original.MountConfiguration
+type NFSMountConfiguration = original.NFSMountConfiguration
type NetworkConfiguration = original.NetworkConfiguration
type NetworkSecurityGroupRule = original.NetworkSecurityGroupRule
type Operation = original.Operation
@@ -399,6 +411,9 @@ func PossibleComputeNodeDeallocationOptionValues() []ComputeNodeDeallocationOpti
func PossibleComputeNodeFillTypeValues() []ComputeNodeFillType {
return original.PossibleComputeNodeFillTypeValues()
}
+func PossibleContainerWorkingDirectoryValues() []ContainerWorkingDirectory {
+ return original.PossibleContainerWorkingDirectoryValues()
+}
func PossibleElevationLevelValues() []ElevationLevel {
return original.PossibleElevationLevelValues()
}
diff --git a/profiles/latest/cognitiveservices/computervision/computervisionapi/models.go b/profiles/latest/cognitiveservices/computervision/computervisionapi/models.go
index f6a25fe21a6e..27a1ed2e9e90 100644
--- a/profiles/latest/cognitiveservices/computervision/computervisionapi/models.go
+++ b/profiles/latest/cognitiveservices/computervision/computervisionapi/models.go
@@ -19,6 +19,6 @@
package computervisionapi
-import original "github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v2.0/computervision/computervisionapi"
+import original "github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v2.1/computervision/computervisionapi"
type BaseClientAPI = original.BaseClientAPI
diff --git a/profiles/latest/cognitiveservices/computervision/models.go b/profiles/latest/cognitiveservices/computervision/models.go
index ee73365606e8..40353f14c743 100644
--- a/profiles/latest/cognitiveservices/computervision/models.go
+++ b/profiles/latest/cognitiveservices/computervision/models.go
@@ -19,13 +19,20 @@
package computervision
-import original "github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v2.0/computervision"
+import original "github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v2.1/computervision"
+
+type DescriptionExclude = original.DescriptionExclude
+
+const (
+ Celebrities DescriptionExclude = original.Celebrities
+ Landmarks DescriptionExclude = original.Landmarks
+)
type Details = original.Details
const (
- Celebrities Details = original.Celebrities
- Landmarks Details = original.Landmarks
+ DetailsCelebrities Details = original.DetailsCelebrities
+ DetailsLandmarks Details = original.DetailsLandmarks
)
type Gender = original.Gender
@@ -158,6 +165,9 @@ func New(endpoint string) BaseClient {
func NewWithoutDefaults(endpoint string) BaseClient {
return original.NewWithoutDefaults(endpoint)
}
+func PossibleDescriptionExcludeValues() []DescriptionExclude {
+ return original.PossibleDescriptionExcludeValues()
+}
func PossibleDetailsValues() []Details {
return original.PossibleDetailsValues()
}
diff --git a/profiles/latest/cognitiveservices/qnamaker/models.go b/profiles/latest/cognitiveservices/qnamaker/models.go
index fefe9d56eef0..2b7767ec320d 100644
--- a/profiles/latest/cognitiveservices/qnamaker/models.go
+++ b/profiles/latest/cognitiveservices/qnamaker/models.go
@@ -47,13 +47,6 @@ const (
ValidationFailure ErrorCodeType = original.ValidationFailure
)
-type KnowledgebaseEnvironmentType = original.KnowledgebaseEnvironmentType
-
-const (
- KnowledgebaseEnvironmentTypeProd KnowledgebaseEnvironmentType = original.KnowledgebaseEnvironmentTypeProd
- KnowledgebaseEnvironmentTypeTest KnowledgebaseEnvironmentType = original.KnowledgebaseEnvironmentTypeTest
-)
-
type OperationStateType = original.OperationStateType
const (
@@ -135,9 +128,6 @@ func PossibleEnvironmentTypeValues() []EnvironmentType {
func PossibleErrorCodeTypeValues() []ErrorCodeType {
return original.PossibleErrorCodeTypeValues()
}
-func PossibleKnowledgebaseEnvironmentTypeValues() []KnowledgebaseEnvironmentType {
- return original.PossibleKnowledgebaseEnvironmentTypeValues()
-}
func PossibleOperationStateTypeValues() []OperationStateType {
return original.PossibleOperationStateTypeValues()
}
diff --git a/profiles/latest/compute/mgmt/compute/computeapi/models.go b/profiles/latest/compute/mgmt/compute/computeapi/models.go
index 26b46b42c4b6..dcae23d3b907 100644
--- a/profiles/latest/compute/mgmt/compute/computeapi/models.go
+++ b/profiles/latest/compute/mgmt/compute/computeapi/models.go
@@ -27,6 +27,8 @@ type DedicatedHostGroupsClientAPI = original.DedicatedHostGroupsClientAPI
type DedicatedHostsClientAPI = original.DedicatedHostsClientAPI
type DisksClientAPI = original.DisksClientAPI
type GalleriesClientAPI = original.GalleriesClientAPI
+type GalleryApplicationVersionsClientAPI = original.GalleryApplicationVersionsClientAPI
+type GalleryApplicationsClientAPI = original.GalleryApplicationsClientAPI
type GalleryImageVersionsClientAPI = original.GalleryImageVersionsClientAPI
type GalleryImagesClientAPI = original.GalleryImagesClientAPI
type ImagesClientAPI = original.ImagesClientAPI
diff --git a/profiles/latest/compute/mgmt/compute/models.go b/profiles/latest/compute/mgmt/compute/models.go
index f7c07e190175..8bafe939394d 100644
--- a/profiles/latest/compute/mgmt/compute/models.go
+++ b/profiles/latest/compute/mgmt/compute/models.go
@@ -302,6 +302,17 @@ const (
ProvisioningState2Updating ProvisioningState2 = original.ProvisioningState2Updating
)
+type ProvisioningState3 = original.ProvisioningState3
+
+const (
+ ProvisioningState3Creating ProvisioningState3 = original.ProvisioningState3Creating
+ ProvisioningState3Deleting ProvisioningState3 = original.ProvisioningState3Deleting
+ ProvisioningState3Failed ProvisioningState3 = original.ProvisioningState3Failed
+ ProvisioningState3Migrating ProvisioningState3 = original.ProvisioningState3Migrating
+ ProvisioningState3Succeeded ProvisioningState3 = original.ProvisioningState3Succeeded
+ ProvisioningState3Updating ProvisioningState3 = original.ProvisioningState3Updating
+)
+
type ProximityPlacementGroupType = original.ProximityPlacementGroupType
const (
@@ -643,6 +654,7 @@ type AvailabilitySetProperties = original.AvailabilitySetProperties
type AvailabilitySetUpdate = original.AvailabilitySetUpdate
type AvailabilitySetsClient = original.AvailabilitySetsClient
type BaseClient = original.BaseClient
+type BillingProfile = original.BillingProfile
type BootDiagnostics = original.BootDiagnostics
type BootDiagnosticsInstanceView = original.BootDiagnosticsInstanceView
type CloudError = original.CloudError
@@ -713,7 +725,25 @@ type GalleriesClient = original.GalleriesClient
type GalleriesCreateOrUpdateFuture = original.GalleriesCreateOrUpdateFuture
type GalleriesDeleteFuture = original.GalleriesDeleteFuture
type Gallery = original.Gallery
+type GalleryApplication = original.GalleryApplication
+type GalleryApplicationList = original.GalleryApplicationList
+type GalleryApplicationListIterator = original.GalleryApplicationListIterator
+type GalleryApplicationListPage = original.GalleryApplicationListPage
+type GalleryApplicationProperties = original.GalleryApplicationProperties
+type GalleryApplicationVersion = original.GalleryApplicationVersion
+type GalleryApplicationVersionList = original.GalleryApplicationVersionList
+type GalleryApplicationVersionListIterator = original.GalleryApplicationVersionListIterator
+type GalleryApplicationVersionListPage = original.GalleryApplicationVersionListPage
+type GalleryApplicationVersionProperties = original.GalleryApplicationVersionProperties
+type GalleryApplicationVersionPublishingProfile = original.GalleryApplicationVersionPublishingProfile
+type GalleryApplicationVersionsClient = original.GalleryApplicationVersionsClient
+type GalleryApplicationVersionsCreateOrUpdateFuture = original.GalleryApplicationVersionsCreateOrUpdateFuture
+type GalleryApplicationVersionsDeleteFuture = original.GalleryApplicationVersionsDeleteFuture
+type GalleryApplicationsClient = original.GalleryApplicationsClient
+type GalleryApplicationsCreateOrUpdateFuture = original.GalleryApplicationsCreateOrUpdateFuture
+type GalleryApplicationsDeleteFuture = original.GalleryApplicationsDeleteFuture
type GalleryArtifactPublishingProfileBase = original.GalleryArtifactPublishingProfileBase
+type GalleryArtifactSource = original.GalleryArtifactSource
type GalleryArtifactVersionSource = original.GalleryArtifactVersionSource
type GalleryDataDiskImage = original.GalleryDataDiskImage
type GalleryDiskImage = original.GalleryDiskImage
@@ -779,6 +809,7 @@ type LogAnalyticsInputBase = original.LogAnalyticsInputBase
type LogAnalyticsOperationResult = original.LogAnalyticsOperationResult
type LogAnalyticsOutput = original.LogAnalyticsOutput
type MaintenanceRedeployStatus = original.MaintenanceRedeployStatus
+type ManagedArtifact = original.ManagedArtifact
type ManagedDiskParameters = original.ManagedDiskParameters
type NetworkInterfaceReference = original.NetworkInterfaceReference
type NetworkInterfaceReferenceProperties = original.NetworkInterfaceReferenceProperties
@@ -835,6 +866,7 @@ type RunCommandParameterDefinition = original.RunCommandParameterDefinition
type RunCommandResult = original.RunCommandResult
type SSHConfiguration = original.SSHConfiguration
type SSHPublicKey = original.SSHPublicKey
+type ScheduledEventsProfile = original.ScheduledEventsProfile
type Sku = original.Sku
type Snapshot = original.Snapshot
type SnapshotList = original.SnapshotList
@@ -855,6 +887,7 @@ type StorageProfile = original.StorageProfile
type SubResource = original.SubResource
type SubResourceReadOnly = original.SubResourceReadOnly
type TargetRegion = original.TargetRegion
+type TerminateNotificationProfile = original.TerminateNotificationProfile
type ThrottledRequestsInput = original.ThrottledRequestsInput
type UpdateResource = original.UpdateResource
type UpgradeOperationHistoricalStatusInfo = original.UpgradeOperationHistoricalStatusInfo
@@ -864,6 +897,7 @@ type UpgradePolicy = original.UpgradePolicy
type Usage = original.Usage
type UsageClient = original.UsageClient
type UsageName = original.UsageName
+type UserArtifactSource = original.UserArtifactSource
type VMScaleSetConvertToSinglePlacementGroupInput = original.VMScaleSetConvertToSinglePlacementGroupInput
type VaultCertificate = original.VaultCertificate
type VaultSecretGroup = original.VaultSecretGroup
@@ -1093,6 +1127,30 @@ func NewGalleriesClient(subscriptionID string) GalleriesClient {
func NewGalleriesClientWithBaseURI(baseURI string, subscriptionID string) GalleriesClient {
return original.NewGalleriesClientWithBaseURI(baseURI, subscriptionID)
}
+func NewGalleryApplicationListIterator(page GalleryApplicationListPage) GalleryApplicationListIterator {
+ return original.NewGalleryApplicationListIterator(page)
+}
+func NewGalleryApplicationListPage(getNextPage func(context.Context, GalleryApplicationList) (GalleryApplicationList, error)) GalleryApplicationListPage {
+ return original.NewGalleryApplicationListPage(getNextPage)
+}
+func NewGalleryApplicationVersionListIterator(page GalleryApplicationVersionListPage) GalleryApplicationVersionListIterator {
+ return original.NewGalleryApplicationVersionListIterator(page)
+}
+func NewGalleryApplicationVersionListPage(getNextPage func(context.Context, GalleryApplicationVersionList) (GalleryApplicationVersionList, error)) GalleryApplicationVersionListPage {
+ return original.NewGalleryApplicationVersionListPage(getNextPage)
+}
+func NewGalleryApplicationVersionsClient(subscriptionID string) GalleryApplicationVersionsClient {
+ return original.NewGalleryApplicationVersionsClient(subscriptionID)
+}
+func NewGalleryApplicationVersionsClientWithBaseURI(baseURI string, subscriptionID string) GalleryApplicationVersionsClient {
+ return original.NewGalleryApplicationVersionsClientWithBaseURI(baseURI, subscriptionID)
+}
+func NewGalleryApplicationsClient(subscriptionID string) GalleryApplicationsClient {
+ return original.NewGalleryApplicationsClient(subscriptionID)
+}
+func NewGalleryApplicationsClientWithBaseURI(baseURI string, subscriptionID string) GalleryApplicationsClient {
+ return original.NewGalleryApplicationsClientWithBaseURI(baseURI, subscriptionID)
+}
func NewGalleryImageListIterator(page GalleryImageListPage) GalleryImageListIterator {
return original.NewGalleryImageListIterator(page)
}
@@ -1387,6 +1445,9 @@ func PossibleProvisioningState1Values() []ProvisioningState1 {
func PossibleProvisioningState2Values() []ProvisioningState2 {
return original.PossibleProvisioningState2Values()
}
+func PossibleProvisioningState3Values() []ProvisioningState3 {
+ return original.PossibleProvisioningState3Values()
+}
func PossibleProvisioningStateValues() []ProvisioningState {
return original.PossibleProvisioningStateValues()
}
diff --git a/profiles/latest/containerservice/mgmt/containerservice/containerserviceapi/models.go b/profiles/latest/containerservice/mgmt/containerservice/containerserviceapi/models.go
index fbc727a11a46..2529a9c3888f 100644
--- a/profiles/latest/containerservice/mgmt/containerservice/containerserviceapi/models.go
+++ b/profiles/latest/containerservice/mgmt/containerservice/containerserviceapi/models.go
@@ -19,7 +19,7 @@
package containerserviceapi
-import original "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2019-06-01/containerservice/containerserviceapi"
+import original "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2019-08-01/containerservice/containerserviceapi"
type AgentPoolsClientAPI = original.AgentPoolsClientAPI
type ContainerServicesClientAPI = original.ContainerServicesClientAPI
diff --git a/profiles/latest/containerservice/mgmt/containerservice/models.go b/profiles/latest/containerservice/mgmt/containerservice/models.go
index 514f6a9268c8..2be679086fb2 100644
--- a/profiles/latest/containerservice/mgmt/containerservice/models.go
+++ b/profiles/latest/containerservice/mgmt/containerservice/models.go
@@ -22,7 +22,7 @@ package containerservice
import (
"context"
- original "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2019-06-01/containerservice"
+ original "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2019-08-01/containerservice"
)
const (
@@ -367,6 +367,7 @@ type ListResultIterator = original.ListResultIterator
type ListResultPage = original.ListResultPage
type ManagedCluster = original.ManagedCluster
type ManagedClusterAADProfile = original.ManagedClusterAADProfile
+type ManagedClusterAPIServerAccessProfile = original.ManagedClusterAPIServerAccessProfile
type ManagedClusterAccessProfile = original.ManagedClusterAccessProfile
type ManagedClusterAddonProfile = original.ManagedClusterAddonProfile
type ManagedClusterAgentPoolProfile = original.ManagedClusterAgentPoolProfile
@@ -375,6 +376,10 @@ type ManagedClusterIdentity = original.ManagedClusterIdentity
type ManagedClusterListResult = original.ManagedClusterListResult
type ManagedClusterListResultIterator = original.ManagedClusterListResultIterator
type ManagedClusterListResultPage = original.ManagedClusterListResultPage
+type ManagedClusterLoadBalancerProfile = original.ManagedClusterLoadBalancerProfile
+type ManagedClusterLoadBalancerProfileManagedOutboundIPs = original.ManagedClusterLoadBalancerProfileManagedOutboundIPs
+type ManagedClusterLoadBalancerProfileOutboundIPPrefixes = original.ManagedClusterLoadBalancerProfileOutboundIPPrefixes
+type ManagedClusterLoadBalancerProfileOutboundIPs = original.ManagedClusterLoadBalancerProfileOutboundIPs
type ManagedClusterPoolUpgradeProfile = original.ManagedClusterPoolUpgradeProfile
type ManagedClusterPoolUpgradeProfileUpgradesItem = original.ManagedClusterPoolUpgradeProfileUpgradesItem
type ManagedClusterProperties = original.ManagedClusterProperties
@@ -419,6 +424,7 @@ type OrchestratorVersionProfileProperties = original.OrchestratorVersionProfileP
type Properties = original.Properties
type PurchasePlan = original.PurchasePlan
type Resource = original.Resource
+type ResourceReference = original.ResourceReference
type SSHConfiguration = original.SSHConfiguration
type SSHPublicKey = original.SSHPublicKey
type ServicePrincipalProfile = original.ServicePrincipalProfile
diff --git a/profiles/latest/datafactory/mgmt/datafactory/models.go b/profiles/latest/datafactory/mgmt/datafactory/models.go
index d33ae6666569..6a2a948586a1 100644
--- a/profiles/latest/datafactory/mgmt/datafactory/models.go
+++ b/profiles/latest/datafactory/mgmt/datafactory/models.go
@@ -46,6 +46,16 @@ const (
AuthorizationTypeRBAC AuthorizationType = original.AuthorizationTypeRBAC
)
+type AvroCompressionCodec = original.AvroCompressionCodec
+
+const (
+ Bzip2 AvroCompressionCodec = original.Bzip2
+ Deflate AvroCompressionCodec = original.Deflate
+ None AvroCompressionCodec = original.None
+ Snappy AvroCompressionCodec = original.Snappy
+ Xz AvroCompressionCodec = original.Xz
+)
+
type AzureFunctionActivityMethod = original.AzureFunctionActivityMethod
const (
@@ -140,6 +150,16 @@ const (
OnPremisesWithIfd DynamicsDeploymentType = original.OnPremisesWithIfd
)
+type EventSubscriptionStatus = original.EventSubscriptionStatus
+
+const (
+ Deprovisioning EventSubscriptionStatus = original.Deprovisioning
+ Disabled EventSubscriptionStatus = original.Disabled
+ Enabled EventSubscriptionStatus = original.Enabled
+ Provisioning EventSubscriptionStatus = original.Provisioning
+ Unknown EventSubscriptionStatus = original.Unknown
+)
+
type FtpAuthenticationType = original.FtpAuthenticationType
const (
@@ -171,9 +191,9 @@ const (
type HDInsightActivityDebugInfoOption = original.HDInsightActivityDebugInfoOption
const (
- Always HDInsightActivityDebugInfoOption = original.Always
- Failure HDInsightActivityDebugInfoOption = original.Failure
- None HDInsightActivityDebugInfoOption = original.None
+ HDInsightActivityDebugInfoOptionAlways HDInsightActivityDebugInfoOption = original.HDInsightActivityDebugInfoOptionAlways
+ HDInsightActivityDebugInfoOptionFailure HDInsightActivityDebugInfoOption = original.HDInsightActivityDebugInfoOptionFailure
+ HDInsightActivityDebugInfoOptionNone HDInsightActivityDebugInfoOption = original.HDInsightActivityDebugInfoOptionNone
)
type HTTPAuthenticationType = original.HTTPAuthenticationType
@@ -301,6 +321,13 @@ const (
IntegrationRuntimeUpdateResultSucceed IntegrationRuntimeUpdateResult = original.IntegrationRuntimeUpdateResultSucceed
)
+type JSONWriteFilePattern = original.JSONWriteFilePattern
+
+const (
+ ArrayOfObjects JSONWriteFilePattern = original.ArrayOfObjects
+ SetOfObjects JSONWriteFilePattern = original.SetOfObjects
+)
+
type ManagedIntegrationRuntimeNodeStatus = original.ManagedIntegrationRuntimeNodeStatus
const (
@@ -603,9 +630,9 @@ const (
type TriggerRuntimeState = original.TriggerRuntimeState
const (
- Disabled TriggerRuntimeState = original.Disabled
- Started TriggerRuntimeState = original.Started
- Stopped TriggerRuntimeState = original.Stopped
+ TriggerRuntimeStateDisabled TriggerRuntimeState = original.TriggerRuntimeStateDisabled
+ TriggerRuntimeStateStarted TriggerRuntimeState = original.TriggerRuntimeStateStarted
+ TriggerRuntimeStateStopped TriggerRuntimeState = original.TriggerRuntimeStateStopped
)
type TumblingWindowFrequency = original.TumblingWindowFrequency
@@ -665,9 +692,11 @@ const (
type TypeBasicCopySink = original.TypeBasicCopySink
const (
+ TypeAvroSink TypeBasicCopySink = original.TypeAvroSink
TypeAzureBlobFSSink TypeBasicCopySink = original.TypeAzureBlobFSSink
TypeAzureDataExplorerSink TypeBasicCopySink = original.TypeAzureDataExplorerSink
TypeAzureDataLakeStoreSink TypeBasicCopySink = original.TypeAzureDataLakeStoreSink
+ TypeAzureMySQLSink TypeBasicCopySink = original.TypeAzureMySQLSink
TypeAzurePostgreSQLSink TypeBasicCopySink = original.TypeAzurePostgreSQLSink
TypeAzureQueueSink TypeBasicCopySink = original.TypeAzureQueueSink
TypeAzureSearchIndexSink TypeBasicCopySink = original.TypeAzureSearchIndexSink
@@ -684,6 +713,7 @@ const (
TypeDynamicsSink TypeBasicCopySink = original.TypeDynamicsSink
TypeFileSystemSink TypeBasicCopySink = original.TypeFileSystemSink
TypeInformixSink TypeBasicCopySink = original.TypeInformixSink
+ TypeJSONSink TypeBasicCopySink = original.TypeJSONSink
TypeMicrosoftAccessSink TypeBasicCopySink = original.TypeMicrosoftAccessSink
TypeOdbcSink TypeBasicCopySink = original.TypeOdbcSink
TypeOracleSink TypeBasicCopySink = original.TypeOracleSink
@@ -702,9 +732,11 @@ type TypeBasicCopySource = original.TypeBasicCopySource
const (
TypeAmazonMWSSource TypeBasicCopySource = original.TypeAmazonMWSSource
TypeAmazonRedshiftSource TypeBasicCopySource = original.TypeAmazonRedshiftSource
+ TypeAvroSource TypeBasicCopySource = original.TypeAvroSource
TypeAzureBlobFSSource TypeBasicCopySource = original.TypeAzureBlobFSSource
TypeAzureDataExplorerSource TypeBasicCopySource = original.TypeAzureDataExplorerSource
TypeAzureDataLakeStoreSource TypeBasicCopySource = original.TypeAzureDataLakeStoreSource
+ TypeAzureMariaDBSource TypeBasicCopySource = original.TypeAzureMariaDBSource
TypeAzureMySQLSource TypeBasicCopySource = original.TypeAzureMySQLSource
TypeAzurePostgreSQLSource TypeBasicCopySource = original.TypeAzurePostgreSQLSource
TypeAzureSQLSource TypeBasicCopySource = original.TypeAzureSQLSource
@@ -737,6 +769,7 @@ const (
TypeImpalaSource TypeBasicCopySource = original.TypeImpalaSource
TypeInformixSource TypeBasicCopySource = original.TypeInformixSource
TypeJiraSource TypeBasicCopySource = original.TypeJiraSource
+ TypeJSONSource TypeBasicCopySource = original.TypeJSONSource
TypeMagentoSource TypeBasicCopySource = original.TypeMagentoSource
TypeMariaDBSource TypeBasicCopySource = original.TypeMariaDBSource
TypeMarketoSource TypeBasicCopySource = original.TypeMarketoSource
@@ -762,6 +795,7 @@ const (
TypeSalesforceMarketingCloudSource TypeBasicCopySource = original.TypeSalesforceMarketingCloudSource
TypeSalesforceServiceCloudSource TypeBasicCopySource = original.TypeSalesforceServiceCloudSource
TypeSalesforceSource TypeBasicCopySource = original.TypeSalesforceSource
+ TypeSapBwSource TypeBasicCopySource = original.TypeSapBwSource
TypeSapCloudForCustomerSource TypeBasicCopySource = original.TypeSapCloudForCustomerSource
TypeSapEccSource TypeBasicCopySource = original.TypeSapEccSource
TypeSapHanaSource TypeBasicCopySource = original.TypeSapHanaSource
@@ -787,11 +821,14 @@ type TypeBasicDataset = original.TypeBasicDataset
const (
TypeAmazonMWSObject TypeBasicDataset = original.TypeAmazonMWSObject
+ TypeAmazonRedshiftTable TypeBasicDataset = original.TypeAmazonRedshiftTable
TypeAmazonS3Object TypeBasicDataset = original.TypeAmazonS3Object
+ TypeAvro TypeBasicDataset = original.TypeAvro
TypeAzureBlob TypeBasicDataset = original.TypeAzureBlob
TypeAzureBlobFSFile TypeBasicDataset = original.TypeAzureBlobFSFile
TypeAzureDataExplorerTable TypeBasicDataset = original.TypeAzureDataExplorerTable
TypeAzureDataLakeStoreFile TypeBasicDataset = original.TypeAzureDataLakeStoreFile
+ TypeAzureMariaDBTable TypeBasicDataset = original.TypeAzureMariaDBTable
TypeAzureMySQLTable TypeBasicDataset = original.TypeAzureMySQLTable
TypeAzurePostgreSQLTable TypeBasicDataset = original.TypeAzurePostgreSQLTable
TypeAzureSearchIndex TypeBasicDataset = original.TypeAzureSearchIndex
@@ -807,6 +844,7 @@ const (
TypeCouchbaseTable TypeBasicDataset = original.TypeCouchbaseTable
TypeCustomDataset TypeBasicDataset = original.TypeCustomDataset
TypeDataset TypeBasicDataset = original.TypeDataset
+ TypeDb2Table TypeBasicDataset = original.TypeDb2Table
TypeDelimitedText TypeBasicDataset = original.TypeDelimitedText
TypeDocumentDbCollection TypeBasicDataset = original.TypeDocumentDbCollection
TypeDrillTable TypeBasicDataset = original.TypeDrillTable
@@ -825,6 +863,7 @@ const (
TypeImpalaObject TypeBasicDataset = original.TypeImpalaObject
TypeInformixTable TypeBasicDataset = original.TypeInformixTable
TypeJiraObject TypeBasicDataset = original.TypeJiraObject
+ TypeJSON TypeBasicDataset = original.TypeJSON
TypeMagentoObject TypeBasicDataset = original.TypeMagentoObject
TypeMariaDBTable TypeBasicDataset = original.TypeMariaDBTable
TypeMarketoObject TypeBasicDataset = original.TypeMarketoObject
@@ -850,6 +889,7 @@ const (
TypeSalesforceMarketingCloudObject TypeBasicDataset = original.TypeSalesforceMarketingCloudObject
TypeSalesforceObject TypeBasicDataset = original.TypeSalesforceObject
TypeSalesforceServiceCloudObject TypeBasicDataset = original.TypeSalesforceServiceCloudObject
+ TypeSapBwCube TypeBasicDataset = original.TypeSapBwCube
TypeSapCloudForCustomerResource TypeBasicDataset = original.TypeSapCloudForCustomerResource
TypeSapEccResource TypeBasicDataset = original.TypeSapEccResource
TypeSapHanaTable TypeBasicDataset = original.TypeSapHanaTable
@@ -860,6 +900,7 @@ const (
TypeSparkObject TypeBasicDataset = original.TypeSparkObject
TypeSQLServerTable TypeBasicDataset = original.TypeSQLServerTable
TypeSquareObject TypeBasicDataset = original.TypeSquareObject
+ TypeSybaseTable TypeBasicDataset = original.TypeSybaseTable
TypeTeradataTable TypeBasicDataset = original.TypeTeradataTable
TypeVerticaTable TypeBasicDataset = original.TypeVerticaTable
TypeWebTable TypeBasicDataset = original.TypeWebTable
@@ -936,6 +977,7 @@ const (
TypeAzureDataLakeStore TypeBasicLinkedService = original.TypeAzureDataLakeStore
TypeAzureFunction TypeBasicLinkedService = original.TypeAzureFunction
TypeAzureKeyVault TypeBasicLinkedService = original.TypeAzureKeyVault
+ TypeAzureMariaDB TypeBasicLinkedService = original.TypeAzureMariaDB
TypeAzureML TypeBasicLinkedService = original.TypeAzureML
TypeAzureMySQL TypeBasicLinkedService = original.TypeAzureMySQL
TypeAzurePostgreSQL TypeBasicLinkedService = original.TypeAzurePostgreSQL
@@ -1027,6 +1069,16 @@ const (
TypeSsisObjectMetadata TypeBasicSsisObjectMetadata = original.TypeSsisObjectMetadata
)
+type TypeBasicStoreWriteSettings = original.TypeBasicStoreWriteSettings
+
+const (
+ TypeAzureBlobFSWriteSettings TypeBasicStoreWriteSettings = original.TypeAzureBlobFSWriteSettings
+ TypeAzureBlobStorageWriteSettings TypeBasicStoreWriteSettings = original.TypeAzureBlobStorageWriteSettings
+ TypeAzureDataLakeStoreWriteSettings TypeBasicStoreWriteSettings = original.TypeAzureDataLakeStoreWriteSettings
+ TypeFileServerWriteSettings TypeBasicStoreWriteSettings = original.TypeFileServerWriteSettings
+ TypeStoreWriteSettings TypeBasicStoreWriteSettings = original.TypeStoreWriteSettings
+)
+
type TypeBasicTrigger = original.TypeBasicTrigger
const (
@@ -1076,6 +1128,8 @@ type AmazonMWSSource = original.AmazonMWSSource
type AmazonRedshiftLinkedService = original.AmazonRedshiftLinkedService
type AmazonRedshiftLinkedServiceTypeProperties = original.AmazonRedshiftLinkedServiceTypeProperties
type AmazonRedshiftSource = original.AmazonRedshiftSource
+type AmazonRedshiftTableDataset = original.AmazonRedshiftTableDataset
+type AmazonRedshiftTableDatasetTypeProperties = original.AmazonRedshiftTableDatasetTypeProperties
type AmazonS3Dataset = original.AmazonS3Dataset
type AmazonS3DatasetTypeProperties = original.AmazonS3DatasetTypeProperties
type AmazonS3LinkedService = original.AmazonS3LinkedService
@@ -1084,7 +1138,12 @@ type AmazonS3Location = original.AmazonS3Location
type AmazonS3ReadSettings = original.AmazonS3ReadSettings
type AppendVariableActivity = original.AppendVariableActivity
type AppendVariableActivityTypeProperties = original.AppendVariableActivityTypeProperties
+type AvroDataset = original.AvroDataset
+type AvroDatasetTypeProperties = original.AvroDatasetTypeProperties
type AvroFormat = original.AvroFormat
+type AvroSink = original.AvroSink
+type AvroSource = original.AvroSource
+type AvroWriteSettings = original.AvroWriteSettings
type AzureBatchLinkedService = original.AzureBatchLinkedService
type AzureBatchLinkedServiceTypeProperties = original.AzureBatchLinkedServiceTypeProperties
type AzureBlobDataset = original.AzureBlobDataset
@@ -1138,8 +1197,13 @@ type AzureMLLinkedServiceTypeProperties = original.AzureMLLinkedServiceTypePrope
type AzureMLUpdateResourceActivity = original.AzureMLUpdateResourceActivity
type AzureMLUpdateResourceActivityTypeProperties = original.AzureMLUpdateResourceActivityTypeProperties
type AzureMLWebServiceFile = original.AzureMLWebServiceFile
+type AzureMariaDBLinkedService = original.AzureMariaDBLinkedService
+type AzureMariaDBLinkedServiceTypeProperties = original.AzureMariaDBLinkedServiceTypeProperties
+type AzureMariaDBSource = original.AzureMariaDBSource
+type AzureMariaDBTableDataset = original.AzureMariaDBTableDataset
type AzureMySQLLinkedService = original.AzureMySQLLinkedService
type AzureMySQLLinkedServiceTypeProperties = original.AzureMySQLLinkedServiceTypeProperties
+type AzureMySQLSink = original.AzureMySQLSink
type AzureMySQLSource = original.AzureMySQLSource
type AzureMySQLTableDataset = original.AzureMySQLTableDataset
type AzureMySQLTableDatasetTypeProperties = original.AzureMySQLTableDatasetTypeProperties
@@ -1194,6 +1258,7 @@ type BasicLinkedService = original.BasicLinkedService
type BasicMultiplePipelineTrigger = original.BasicMultiplePipelineTrigger
type BasicSecretBase = original.BasicSecretBase
type BasicSsisObjectMetadata = original.BasicSsisObjectMetadata
+type BasicStoreWriteSettings = original.BasicStoreWriteSettings
type BasicTrigger = original.BasicTrigger
type BasicTriggerDependencyReference = original.BasicTriggerDependencyReference
type BasicWebLinkedServiceTypeProperties = original.BasicWebLinkedServiceTypeProperties
@@ -1274,6 +1339,8 @@ type DatasetsClient = original.DatasetsClient
type Db2LinkedService = original.Db2LinkedService
type Db2LinkedServiceTypeProperties = original.Db2LinkedServiceTypeProperties
type Db2Source = original.Db2Source
+type Db2TableDataset = original.Db2TableDataset
+type Db2TableDatasetTypeProperties = original.Db2TableDatasetTypeProperties
type DeleteActivity = original.DeleteActivity
type DeleteActivityTypeProperties = original.DeleteActivityTypeProperties
type DelimitedTextDataset = original.DelimitedTextDataset
@@ -1288,6 +1355,7 @@ type DocumentDbCollectionDataset = original.DocumentDbCollectionDataset
type DocumentDbCollectionDatasetTypeProperties = original.DocumentDbCollectionDatasetTypeProperties
type DocumentDbCollectionSink = original.DocumentDbCollectionSink
type DocumentDbCollectionSource = original.DocumentDbCollectionSource
+type DrillDatasetTypeProperties = original.DrillDatasetTypeProperties
type DrillLinkedService = original.DrillLinkedService
type DrillLinkedServiceTypeProperties = original.DrillLinkedServiceTypeProperties
type DrillSource = original.DrillSource
@@ -1364,10 +1432,12 @@ type GoogleAdWordsLinkedService = original.GoogleAdWordsLinkedService
type GoogleAdWordsLinkedServiceTypeProperties = original.GoogleAdWordsLinkedServiceTypeProperties
type GoogleAdWordsObjectDataset = original.GoogleAdWordsObjectDataset
type GoogleAdWordsSource = original.GoogleAdWordsSource
+type GoogleBigQueryDatasetTypeProperties = original.GoogleBigQueryDatasetTypeProperties
type GoogleBigQueryLinkedService = original.GoogleBigQueryLinkedService
type GoogleBigQueryLinkedServiceTypeProperties = original.GoogleBigQueryLinkedServiceTypeProperties
type GoogleBigQueryObjectDataset = original.GoogleBigQueryObjectDataset
type GoogleBigQuerySource = original.GoogleBigQuerySource
+type GreenplumDatasetTypeProperties = original.GreenplumDatasetTypeProperties
type GreenplumLinkedService = original.GreenplumLinkedService
type GreenplumLinkedServiceTypeProperties = original.GreenplumLinkedServiceTypeProperties
type GreenplumSource = original.GreenplumSource
@@ -1402,6 +1472,7 @@ type HdfsLinkedServiceTypeProperties = original.HdfsLinkedServiceTypeProperties
type HdfsLocation = original.HdfsLocation
type HdfsReadSettings = original.HdfsReadSettings
type HdfsSource = original.HdfsSource
+type HiveDatasetTypeProperties = original.HiveDatasetTypeProperties
type HiveLinkedService = original.HiveLinkedService
type HiveLinkedServiceTypeProperties = original.HiveLinkedServiceTypeProperties
type HiveObjectDataset = original.HiveObjectDataset
@@ -1412,6 +1483,7 @@ type HubspotObjectDataset = original.HubspotObjectDataset
type HubspotSource = original.HubspotSource
type IfConditionActivity = original.IfConditionActivity
type IfConditionActivityTypeProperties = original.IfConditionActivityTypeProperties
+type ImpalaDatasetTypeProperties = original.ImpalaDatasetTypeProperties
type ImpalaLinkedService = original.ImpalaLinkedService
type ImpalaLinkedServiceTypeProperties = original.ImpalaLinkedServiceTypeProperties
type ImpalaObjectDataset = original.ImpalaObjectDataset
@@ -1449,7 +1521,12 @@ type IntegrationRuntimeVNetProperties = original.IntegrationRuntimeVNetPropertie
type IntegrationRuntimesClient = original.IntegrationRuntimesClient
type IntegrationRuntimesStartFuture = original.IntegrationRuntimesStartFuture
type IntegrationRuntimesStopFuture = original.IntegrationRuntimesStopFuture
+type JSONDataset = original.JSONDataset
+type JSONDatasetTypeProperties = original.JSONDatasetTypeProperties
type JSONFormat = original.JSONFormat
+type JSONSink = original.JSONSink
+type JSONSource = original.JSONSource
+type JSONWriteSettings = original.JSONWriteSettings
type JiraLinkedService = original.JiraLinkedService
type JiraLinkedServiceTypeProperties = original.JiraLinkedServiceTypeProperties
type JiraObjectDataset = original.JiraObjectDataset
@@ -1516,6 +1593,7 @@ type NetezzaLinkedServiceTypeProperties = original.NetezzaLinkedServiceTypePrope
type NetezzaPartitionSettings = original.NetezzaPartitionSettings
type NetezzaSource = original.NetezzaSource
type NetezzaTableDataset = original.NetezzaTableDataset
+type NetezzaTableDatasetTypeProperties = original.NetezzaTableDatasetTypeProperties
type ODataLinkedService = original.ODataLinkedService
type ODataLinkedServiceTypeProperties = original.ODataLinkedServiceTypeProperties
type ODataResourceDataset = original.ODataResourceDataset
@@ -1566,6 +1644,7 @@ type PaypalLinkedService = original.PaypalLinkedService
type PaypalLinkedServiceTypeProperties = original.PaypalLinkedServiceTypeProperties
type PaypalObjectDataset = original.PaypalObjectDataset
type PaypalSource = original.PaypalSource
+type PhoenixDatasetTypeProperties = original.PhoenixDatasetTypeProperties
type PhoenixLinkedService = original.PhoenixLinkedService
type PhoenixLinkedServiceTypeProperties = original.PhoenixLinkedServiceTypeProperties
type PhoenixObjectDataset = original.PhoenixObjectDataset
@@ -1588,6 +1667,7 @@ type PostgreSQLLinkedServiceTypeProperties = original.PostgreSQLLinkedServiceTyp
type PostgreSQLSource = original.PostgreSQLSource
type PostgreSQLTableDataset = original.PostgreSQLTableDataset
type PostgreSQLTableDatasetTypeProperties = original.PostgreSQLTableDatasetTypeProperties
+type PrestoDatasetTypeProperties = original.PrestoDatasetTypeProperties
type PrestoLinkedService = original.PrestoLinkedService
type PrestoLinkedServiceTypeProperties = original.PrestoLinkedServiceTypeProperties
type PrestoObjectDataset = original.PrestoObjectDataset
@@ -1668,6 +1748,8 @@ type SalesforceSink = original.SalesforceSink
type SalesforceSource = original.SalesforceSource
type SapBWLinkedService = original.SapBWLinkedService
type SapBWLinkedServiceTypeProperties = original.SapBWLinkedServiceTypeProperties
+type SapBwCubeDataset = original.SapBwCubeDataset
+type SapBwSource = original.SapBwSource
type SapCloudForCustomerLinkedService = original.SapCloudForCustomerLinkedService
type SapCloudForCustomerLinkedServiceTypeProperties = original.SapCloudForCustomerLinkedServiceTypeProperties
type SapCloudForCustomerResourceDataset = original.SapCloudForCustomerResourceDataset
@@ -1721,6 +1803,7 @@ type ShopifyLinkedService = original.ShopifyLinkedService
type ShopifyLinkedServiceTypeProperties = original.ShopifyLinkedServiceTypeProperties
type ShopifyObjectDataset = original.ShopifyObjectDataset
type ShopifySource = original.ShopifySource
+type SparkDatasetTypeProperties = original.SparkDatasetTypeProperties
type SparkLinkedService = original.SparkLinkedService
type SparkLinkedServiceTypeProperties = original.SparkLinkedServiceTypeProperties
type SparkObjectDataset = original.SparkObjectDataset
@@ -1747,6 +1830,8 @@ type SubResource = original.SubResource
type SybaseLinkedService = original.SybaseLinkedService
type SybaseLinkedServiceTypeProperties = original.SybaseLinkedServiceTypeProperties
type SybaseSource = original.SybaseSource
+type SybaseTableDataset = original.SybaseTableDataset
+type SybaseTableDatasetTypeProperties = original.SybaseTableDatasetTypeProperties
type TeradataLinkedService = original.TeradataLinkedService
type TeradataLinkedServiceTypeProperties = original.TeradataLinkedServiceTypeProperties
type TeradataPartitionSettings = original.TeradataPartitionSettings
@@ -1765,9 +1850,12 @@ type TriggerResource = original.TriggerResource
type TriggerRun = original.TriggerRun
type TriggerRunsClient = original.TriggerRunsClient
type TriggerRunsQueryResponse = original.TriggerRunsQueryResponse
+type TriggerSubscriptionOperationStatus = original.TriggerSubscriptionOperationStatus
type TriggersClient = original.TriggersClient
type TriggersStartFuture = original.TriggersStartFuture
type TriggersStopFuture = original.TriggersStopFuture
+type TriggersSubscribeToEventsFuture = original.TriggersSubscribeToEventsFuture
+type TriggersUnsubscribeFromEventsFuture = original.TriggersUnsubscribeFromEventsFuture
type TumblingWindowTrigger = original.TumblingWindowTrigger
type TumblingWindowTriggerDependencyReference = original.TumblingWindowTriggerDependencyReference
type TumblingWindowTriggerTypeProperties = original.TumblingWindowTriggerTypeProperties
@@ -1780,6 +1868,7 @@ type UserProperty = original.UserProperty
type ValidationActivity = original.ValidationActivity
type ValidationActivityTypeProperties = original.ValidationActivityTypeProperties
type VariableSpecification = original.VariableSpecification
+type VerticaDatasetTypeProperties = original.VerticaDatasetTypeProperties
type VerticaLinkedService = original.VerticaLinkedService
type VerticaLinkedServiceTypeProperties = original.VerticaLinkedServiceTypeProperties
type VerticaSource = original.VerticaSource
@@ -1952,6 +2041,9 @@ func PossibleAuthenticationTypeValues() []AuthenticationType {
func PossibleAuthorizationTypeValues() []AuthorizationType {
return original.PossibleAuthorizationTypeValues()
}
+func PossibleAvroCompressionCodecValues() []AvroCompressionCodec {
+ return original.PossibleAvroCompressionCodecValues()
+}
func PossibleAzureFunctionActivityMethodValues() []AzureFunctionActivityMethod {
return original.PossibleAzureFunctionActivityMethodValues()
}
@@ -1982,6 +2074,9 @@ func PossibleDynamicsAuthenticationTypeValues() []DynamicsAuthenticationType {
func PossibleDynamicsDeploymentTypeValues() []DynamicsDeploymentType {
return original.PossibleDynamicsDeploymentTypeValues()
}
+func PossibleEventSubscriptionStatusValues() []EventSubscriptionStatus {
+ return original.PossibleEventSubscriptionStatusValues()
+}
func PossibleFtpAuthenticationTypeValues() []FtpAuthenticationType {
return original.PossibleFtpAuthenticationTypeValues()
}
@@ -2042,6 +2137,9 @@ func PossibleIntegrationRuntimeTypeValues() []IntegrationRuntimeType {
func PossibleIntegrationRuntimeUpdateResultValues() []IntegrationRuntimeUpdateResult {
return original.PossibleIntegrationRuntimeUpdateResultValues()
}
+func PossibleJSONWriteFilePatternValues() []JSONWriteFilePattern {
+ return original.PossibleJSONWriteFilePatternValues()
+}
func PossibleManagedIntegrationRuntimeNodeStatusValues() []ManagedIntegrationRuntimeNodeStatus {
return original.PossibleManagedIntegrationRuntimeNodeStatusValues()
}
@@ -2186,6 +2284,9 @@ func PossibleTypeBasicLinkedServiceValues() []TypeBasicLinkedService {
func PossibleTypeBasicSsisObjectMetadataValues() []TypeBasicSsisObjectMetadata {
return original.PossibleTypeBasicSsisObjectMetadataValues()
}
+func PossibleTypeBasicStoreWriteSettingsValues() []TypeBasicStoreWriteSettings {
+ return original.PossibleTypeBasicStoreWriteSettingsValues()
+}
func PossibleTypeBasicTriggerValues() []TypeBasicTrigger {
return original.PossibleTypeBasicTriggerValues()
}
diff --git a/profiles/latest/datamigration/mgmt/datamigration/models.go b/profiles/latest/datamigration/mgmt/datamigration/models.go
index d36b80f854bb..4827fc7d1ca8 100644
--- a/profiles/latest/datamigration/mgmt/datamigration/models.go
+++ b/profiles/latest/datamigration/mgmt/datamigration/models.go
@@ -83,9 +83,9 @@ const (
type CommandType = original.CommandType
const (
+ CommandTypeCommandProperties CommandType = original.CommandTypeCommandProperties
CommandTypeMigrateSQLServerAzureDbSQLMiComplete CommandType = original.CommandTypeMigrateSQLServerAzureDbSQLMiComplete
CommandTypeMigrateSyncCompleteDatabase CommandType = original.CommandTypeMigrateSyncCompleteDatabase
- CommandTypeUnknown CommandType = original.CommandTypeUnknown
)
type DatabaseCompatLevel = original.DatabaseCompatLevel
@@ -486,7 +486,7 @@ const (
TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS TaskType = original.TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS
TaskTypeMigrateSQLServerAzureSQLDbSync TaskType = original.TaskTypeMigrateSQLServerAzureSQLDbSync
TaskTypeMigrateSQLServerSQLDb TaskType = original.TaskTypeMigrateSQLServerSQLDb
- TaskTypeUnknown TaskType = original.TaskTypeUnknown
+ TaskTypeProjectTaskProperties TaskType = original.TaskTypeProjectTaskProperties
TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI TaskType = original.TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI
TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS TaskType = original.TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS
TaskTypeValidateMigrationInputSQLServerSQLDbSync TaskType = original.TaskTypeValidateMigrationInputSQLServerSQLDbSync
@@ -495,11 +495,11 @@ const (
type Type = original.Type
const (
+ TypeConnectionInfo Type = original.TypeConnectionInfo
TypeMiSQLConnectionInfo Type = original.TypeMiSQLConnectionInfo
TypeMySQLConnectionInfo Type = original.TypeMySQLConnectionInfo
TypePostgreSQLConnectionInfo Type = original.TypePostgreSQLConnectionInfo
TypeSQLConnectionInfo Type = original.TypeSQLConnectionInfo
- TypeUnknown Type = original.TypeUnknown
)
type UpdateActionType = original.UpdateActionType
diff --git a/profiles/latest/generate.go b/profiles/latest/generate.go
index 4a8f0709cd96..cae43ff4afe4 100644
--- a/profiles/latest/generate.go
+++ b/profiles/latest/generate.go
@@ -14,4 +14,4 @@
package latest
-//go:generate go run ../../tools/profileBuilder/main.go latest --clear-output --name latest --root ../../services --output-location ./ --verbose
+//go:generate go run ../../tools/profileBuilder/main.go latest --clear-output --name latest --root ../../services --output-location ./
diff --git a/profiles/latest/logic/mgmt/logic/logicapi/models.go b/profiles/latest/logic/mgmt/logic/logicapi/models.go
index fd8780499286..d311caa49361 100644
--- a/profiles/latest/logic/mgmt/logic/logicapi/models.go
+++ b/profiles/latest/logic/mgmt/logic/logicapi/models.go
@@ -29,6 +29,7 @@ type IntegrationAccountBatchConfigurationsClientAPI = original.IntegrationAccoun
type IntegrationAccountsClientAPI = original.IntegrationAccountsClientAPI
type MapsClientAPI = original.MapsClientAPI
type PartnersClientAPI = original.PartnersClientAPI
+type RosettaNetProcessConfigurationsClientAPI = original.RosettaNetProcessConfigurationsClientAPI
type SchemasClientAPI = original.SchemasClientAPI
type SessionsClientAPI = original.SessionsClientAPI
type WorkflowRunActionRepetitionsClientAPI = original.WorkflowRunActionRepetitionsClientAPI
diff --git a/profiles/latest/logic/mgmt/logic/models.go b/profiles/latest/logic/mgmt/logic/models.go
index 7acb3beca961..e56cc580326c 100644
--- a/profiles/latest/logic/mgmt/logic/models.go
+++ b/profiles/latest/logic/mgmt/logic/models.go
@@ -156,6 +156,15 @@ const (
MapTypeXslt MapType = original.MapTypeXslt
)
+type MessageEncoding = original.MessageEncoding
+
+const (
+ MessageEncodingBase64 MessageEncoding = original.MessageEncodingBase64
+ MessageEncodingEightBit MessageEncoding = original.MessageEncodingEightBit
+ MessageEncodingNotSpecified MessageEncoding = original.MessageEncodingNotSpecified
+ MessageEncodingQuotedPrintable MessageEncoding = original.MessageEncodingQuotedPrintable
+)
+
type MessageFilterType = original.MessageFilterType
const (
@@ -198,6 +207,52 @@ const (
RecurrenceFrequencyYear RecurrenceFrequency = original.RecurrenceFrequencyYear
)
+type RosettaNetActionType = original.RosettaNetActionType
+
+const (
+ RosettaNetActionTypeDoubleAction RosettaNetActionType = original.RosettaNetActionTypeDoubleAction
+ RosettaNetActionTypeNotSpecified RosettaNetActionType = original.RosettaNetActionTypeNotSpecified
+ RosettaNetActionTypeSingleAction RosettaNetActionType = original.RosettaNetActionTypeSingleAction
+)
+
+type RosettaNetPipActivityType = original.RosettaNetPipActivityType
+
+const (
+ RosettaNetPipActivityTypeBusinessTransaction RosettaNetPipActivityType = original.RosettaNetPipActivityTypeBusinessTransaction
+ RosettaNetPipActivityTypeInformationDistribution RosettaNetPipActivityType = original.RosettaNetPipActivityTypeInformationDistribution
+ RosettaNetPipActivityTypeNotification RosettaNetPipActivityType = original.RosettaNetPipActivityTypeNotification
+ RosettaNetPipActivityTypeNotSpecified RosettaNetPipActivityType = original.RosettaNetPipActivityTypeNotSpecified
+ RosettaNetPipActivityTypeQueryResponse RosettaNetPipActivityType = original.RosettaNetPipActivityTypeQueryResponse
+ RosettaNetPipActivityTypeRequestConfirm RosettaNetPipActivityType = original.RosettaNetPipActivityTypeRequestConfirm
+ RosettaNetPipActivityTypeRequestResponse RosettaNetPipActivityType = original.RosettaNetPipActivityTypeRequestResponse
+)
+
+type RosettaNetPipConfidentialityScope = original.RosettaNetPipConfidentialityScope
+
+const (
+ RosettaNetPipConfidentialityScopeNone RosettaNetPipConfidentialityScope = original.RosettaNetPipConfidentialityScopeNone
+ RosettaNetPipConfidentialityScopeNotSpecified RosettaNetPipConfidentialityScope = original.RosettaNetPipConfidentialityScopeNotSpecified
+ RosettaNetPipConfidentialityScopePayload RosettaNetPipConfidentialityScope = original.RosettaNetPipConfidentialityScopePayload
+ RosettaNetPipConfidentialityScopePayloadContainer RosettaNetPipConfidentialityScope = original.RosettaNetPipConfidentialityScopePayloadContainer
+)
+
+type RosettaNetPipRoleType = original.RosettaNetPipRoleType
+
+const (
+ RosettaNetPipRoleTypeEmployee RosettaNetPipRoleType = original.RosettaNetPipRoleTypeEmployee
+ RosettaNetPipRoleTypeFunctional RosettaNetPipRoleType = original.RosettaNetPipRoleTypeFunctional
+ RosettaNetPipRoleTypeNotSpecified RosettaNetPipRoleType = original.RosettaNetPipRoleTypeNotSpecified
+ RosettaNetPipRoleTypeOrganizational RosettaNetPipRoleType = original.RosettaNetPipRoleTypeOrganizational
+)
+
+type RosettaNetResponseType = original.RosettaNetResponseType
+
+const (
+ RosettaNetResponseTypeAsync RosettaNetResponseType = original.RosettaNetResponseTypeAsync
+ RosettaNetResponseTypeNotSpecified RosettaNetResponseType = original.RosettaNetResponseTypeNotSpecified
+ RosettaNetResponseTypeSync RosettaNetResponseType = original.RosettaNetResponseTypeSync
+)
+
type SchemaType = original.SchemaType
const (
@@ -468,6 +523,11 @@ type IntegrationAccountPartnerListResult = original.IntegrationAccountPartnerLis
type IntegrationAccountPartnerListResultIterator = original.IntegrationAccountPartnerListResultIterator
type IntegrationAccountPartnerListResultPage = original.IntegrationAccountPartnerListResultPage
type IntegrationAccountPartnerProperties = original.IntegrationAccountPartnerProperties
+type IntegrationAccountRosettaNetProcessConfiguration = original.IntegrationAccountRosettaNetProcessConfiguration
+type IntegrationAccountRosettaNetProcessConfigurationListResult = original.IntegrationAccountRosettaNetProcessConfigurationListResult
+type IntegrationAccountRosettaNetProcessConfigurationListResultIterator = original.IntegrationAccountRosettaNetProcessConfigurationListResultIterator
+type IntegrationAccountRosettaNetProcessConfigurationListResultPage = original.IntegrationAccountRosettaNetProcessConfigurationListResultPage
+type IntegrationAccountRosettaNetProcessConfigurationProperties = original.IntegrationAccountRosettaNetProcessConfigurationProperties
type IntegrationAccountSchema = original.IntegrationAccountSchema
type IntegrationAccountSchemaFilter = original.IntegrationAccountSchemaFilter
type IntegrationAccountSchemaListResult = original.IntegrationAccountSchemaListResult
@@ -514,6 +574,12 @@ type Resource = original.Resource
type ResourceReference = original.ResourceReference
type Response = original.Response
type RetryHistory = original.RetryHistory
+type RosettaNetPipAcknowledgmentOfReceiptSettings = original.RosettaNetPipAcknowledgmentOfReceiptSettings
+type RosettaNetPipActivityBehavior = original.RosettaNetPipActivityBehavior
+type RosettaNetPipActivitySettings = original.RosettaNetPipActivitySettings
+type RosettaNetPipBusinessDocument = original.RosettaNetPipBusinessDocument
+type RosettaNetPipRoleSettings = original.RosettaNetPipRoleSettings
+type RosettaNetProcessConfigurationsClient = original.RosettaNetProcessConfigurationsClient
type RunActionCorrelation = original.RunActionCorrelation
type RunCorrelation = original.RunCorrelation
type SchemasClient = original.SchemasClient
@@ -653,6 +719,12 @@ func NewIntegrationAccountPartnerListResultIterator(page IntegrationAccountPartn
func NewIntegrationAccountPartnerListResultPage(getNextPage func(context.Context, IntegrationAccountPartnerListResult) (IntegrationAccountPartnerListResult, error)) IntegrationAccountPartnerListResultPage {
return original.NewIntegrationAccountPartnerListResultPage(getNextPage)
}
+func NewIntegrationAccountRosettaNetProcessConfigurationListResultIterator(page IntegrationAccountRosettaNetProcessConfigurationListResultPage) IntegrationAccountRosettaNetProcessConfigurationListResultIterator {
+ return original.NewIntegrationAccountRosettaNetProcessConfigurationListResultIterator(page)
+}
+func NewIntegrationAccountRosettaNetProcessConfigurationListResultPage(getNextPage func(context.Context, IntegrationAccountRosettaNetProcessConfigurationListResult) (IntegrationAccountRosettaNetProcessConfigurationListResult, error)) IntegrationAccountRosettaNetProcessConfigurationListResultPage {
+ return original.NewIntegrationAccountRosettaNetProcessConfigurationListResultPage(getNextPage)
+}
func NewIntegrationAccountSchemaListResultIterator(page IntegrationAccountSchemaListResultPage) IntegrationAccountSchemaListResultIterator {
return original.NewIntegrationAccountSchemaListResultIterator(page)
}
@@ -695,6 +767,12 @@ func NewRequestHistoryListResultIterator(page RequestHistoryListResultPage) Requ
func NewRequestHistoryListResultPage(getNextPage func(context.Context, RequestHistoryListResult) (RequestHistoryListResult, error)) RequestHistoryListResultPage {
return original.NewRequestHistoryListResultPage(getNextPage)
}
+func NewRosettaNetProcessConfigurationsClient(subscriptionID string) RosettaNetProcessConfigurationsClient {
+ return original.NewRosettaNetProcessConfigurationsClient(subscriptionID)
+}
+func NewRosettaNetProcessConfigurationsClientWithBaseURI(baseURI string, subscriptionID string) RosettaNetProcessConfigurationsClient {
+ return original.NewRosettaNetProcessConfigurationsClientWithBaseURI(baseURI, subscriptionID)
+}
func NewSchemasClient(subscriptionID string) SchemasClient {
return original.NewSchemasClient(subscriptionID)
}
@@ -848,6 +926,9 @@ func PossibleKeyTypeValues() []KeyType {
func PossibleMapTypeValues() []MapType {
return original.PossibleMapTypeValues()
}
+func PossibleMessageEncodingValues() []MessageEncoding {
+ return original.PossibleMessageEncodingValues()
+}
func PossibleMessageFilterTypeValues() []MessageFilterType {
return original.PossibleMessageFilterTypeValues()
}
@@ -860,6 +941,21 @@ func PossiblePartnerTypeValues() []PartnerType {
func PossibleRecurrenceFrequencyValues() []RecurrenceFrequency {
return original.PossibleRecurrenceFrequencyValues()
}
+func PossibleRosettaNetActionTypeValues() []RosettaNetActionType {
+ return original.PossibleRosettaNetActionTypeValues()
+}
+func PossibleRosettaNetPipActivityTypeValues() []RosettaNetPipActivityType {
+ return original.PossibleRosettaNetPipActivityTypeValues()
+}
+func PossibleRosettaNetPipConfidentialityScopeValues() []RosettaNetPipConfidentialityScope {
+ return original.PossibleRosettaNetPipConfidentialityScopeValues()
+}
+func PossibleRosettaNetPipRoleTypeValues() []RosettaNetPipRoleType {
+ return original.PossibleRosettaNetPipRoleTypeValues()
+}
+func PossibleRosettaNetResponseTypeValues() []RosettaNetResponseType {
+ return original.PossibleRosettaNetResponseTypeValues()
+}
func PossibleSchemaTypeValues() []SchemaType {
return original.PossibleSchemaTypeValues()
}
diff --git a/profiles/latest/machinelearningservices/mgmt/machinelearningservices/machinelearningservicesapi/models.go b/profiles/latest/machinelearningservices/mgmt/machinelearningservices/machinelearningservicesapi/models.go
index ec13e27528ef..0d80dd1aa71c 100644
--- a/profiles/latest/machinelearningservices/mgmt/machinelearningservices/machinelearningservicesapi/models.go
+++ b/profiles/latest/machinelearningservices/mgmt/machinelearningservices/machinelearningservicesapi/models.go
@@ -19,7 +19,7 @@
package machinelearningservicesapi
-import original "github.com/Azure/azure-sdk-for-go/services/machinelearningservices/mgmt/2019-05-01/machinelearningservices/machinelearningservicesapi"
+import original "github.com/Azure/azure-sdk-for-go/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices/machinelearningservicesapi"
type MachineLearningComputeClientAPI = original.MachineLearningComputeClientAPI
type OperationsClientAPI = original.OperationsClientAPI
diff --git a/profiles/latest/machinelearningservices/mgmt/machinelearningservices/models.go b/profiles/latest/machinelearningservices/mgmt/machinelearningservices/models.go
index f36813f47d08..683b49dd75be 100644
--- a/profiles/latest/machinelearningservices/mgmt/machinelearningservices/models.go
+++ b/profiles/latest/machinelearningservices/mgmt/machinelearningservices/models.go
@@ -22,7 +22,7 @@ package machinelearningservices
import (
"context"
- original "github.com/Azure/azure-sdk-for-go/services/machinelearningservices/mgmt/2019-05-01/machinelearningservices"
+ original "github.com/Azure/azure-sdk-for-go/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices"
)
const (
@@ -77,6 +77,17 @@ const (
ComputeTypeBasicComputeSecretsComputeTypeVirtualMachine ComputeTypeBasicComputeSecrets = original.ComputeTypeBasicComputeSecretsComputeTypeVirtualMachine
)
+type NodeState = original.NodeState
+
+const (
+ Idle NodeState = original.Idle
+ Leaving NodeState = original.Leaving
+ Preempted NodeState = original.Preempted
+ Preparing NodeState = original.Preparing
+ Running NodeState = original.Running
+ Unusable NodeState = original.Unusable
+)
+
type ProvisioningState = original.ProvisioningState
const (
@@ -89,6 +100,14 @@ const (
Updating ProvisioningState = original.Updating
)
+type RemoteLoginPortPublicAccess = original.RemoteLoginPortPublicAccess
+
+const (
+ Disabled RemoteLoginPortPublicAccess = original.Disabled
+ Enabled RemoteLoginPortPublicAccess = original.Enabled
+ NotSpecified RemoteLoginPortPublicAccess = original.NotSpecified
+)
+
type ResourceIdentityType = original.ResourceIdentityType
const (
@@ -98,8 +117,8 @@ const (
type Status = original.Status
const (
- Disabled Status = original.Disabled
- Enabled Status = original.Enabled
+ StatusDisabled Status = original.StatusDisabled
+ StatusEnabled Status = original.StatusEnabled
)
type UnderlyingResourceAction = original.UnderlyingResourceAction
@@ -266,9 +285,15 @@ func PossibleComputeTypeBasicComputeValues() []ComputeTypeBasicCompute {
func PossibleComputeTypeValues() []ComputeType {
return original.PossibleComputeTypeValues()
}
+func PossibleNodeStateValues() []NodeState {
+ return original.PossibleNodeStateValues()
+}
func PossibleProvisioningStateValues() []ProvisioningState {
return original.PossibleProvisioningStateValues()
}
+func PossibleRemoteLoginPortPublicAccessValues() []RemoteLoginPortPublicAccess {
+ return original.PossibleRemoteLoginPortPublicAccessValues()
+}
func PossibleResourceIdentityTypeValues() []ResourceIdentityType {
return original.PossibleResourceIdentityTypeValues()
}
diff --git a/profiles/latest/network/mgmt/network/models.go b/profiles/latest/network/mgmt/network/models.go
index d82d840bee50..c2f072e3829c 100644
--- a/profiles/latest/network/mgmt/network/models.go
+++ b/profiles/latest/network/mgmt/network/models.go
@@ -422,6 +422,37 @@ const (
QinQ ExpressRoutePortsEncapsulation = original.QinQ
)
+type FirewallPolicyFilterRuleActionType = original.FirewallPolicyFilterRuleActionType
+
+const (
+ FirewallPolicyFilterRuleActionTypeAlert FirewallPolicyFilterRuleActionType = original.FirewallPolicyFilterRuleActionTypeAlert
+ FirewallPolicyFilterRuleActionTypeAllow FirewallPolicyFilterRuleActionType = original.FirewallPolicyFilterRuleActionTypeAllow
+ FirewallPolicyFilterRuleActionTypeDeny FirewallPolicyFilterRuleActionType = original.FirewallPolicyFilterRuleActionTypeDeny
+)
+
+type FirewallPolicyNatRuleActionType = original.FirewallPolicyNatRuleActionType
+
+const (
+ DNAT FirewallPolicyNatRuleActionType = original.DNAT
+ SNAT FirewallPolicyNatRuleActionType = original.SNAT
+)
+
+type FirewallPolicyRuleConditionApplicationProtocolType = original.FirewallPolicyRuleConditionApplicationProtocolType
+
+const (
+ FirewallPolicyRuleConditionApplicationProtocolTypeHTTP FirewallPolicyRuleConditionApplicationProtocolType = original.FirewallPolicyRuleConditionApplicationProtocolTypeHTTP
+ FirewallPolicyRuleConditionApplicationProtocolTypeHTTPS FirewallPolicyRuleConditionApplicationProtocolType = original.FirewallPolicyRuleConditionApplicationProtocolTypeHTTPS
+)
+
+type FirewallPolicyRuleConditionNetworkProtocol = original.FirewallPolicyRuleConditionNetworkProtocol
+
+const (
+ FirewallPolicyRuleConditionNetworkProtocolAny FirewallPolicyRuleConditionNetworkProtocol = original.FirewallPolicyRuleConditionNetworkProtocolAny
+ FirewallPolicyRuleConditionNetworkProtocolICMP FirewallPolicyRuleConditionNetworkProtocol = original.FirewallPolicyRuleConditionNetworkProtocolICMP
+ FirewallPolicyRuleConditionNetworkProtocolTCP FirewallPolicyRuleConditionNetworkProtocol = original.FirewallPolicyRuleConditionNetworkProtocolTCP
+ FirewallPolicyRuleConditionNetworkProtocolUDP FirewallPolicyRuleConditionNetworkProtocol = original.FirewallPolicyRuleConditionNetworkProtocolUDP
+)
+
type FlowLogFormatType = original.FlowLogFormatType
const (
@@ -698,6 +729,22 @@ const (
RouteNextHopTypeVnetLocal RouteNextHopType = original.RouteNextHopTypeVnetLocal
)
+type RuleConditionType = original.RuleConditionType
+
+const (
+ RuleConditionTypeApplicationRuleCondition RuleConditionType = original.RuleConditionTypeApplicationRuleCondition
+ RuleConditionTypeFirewallPolicyRuleCondition RuleConditionType = original.RuleConditionTypeFirewallPolicyRuleCondition
+ RuleConditionTypeNetworkRuleCondition RuleConditionType = original.RuleConditionTypeNetworkRuleCondition
+)
+
+type RuleType = original.RuleType
+
+const (
+ RuleTypeFirewallPolicyFilterRule RuleType = original.RuleTypeFirewallPolicyFilterRule
+ RuleTypeFirewallPolicyNatRule RuleType = original.RuleTypeFirewallPolicyNatRule
+ RuleTypeFirewallPolicyRule RuleType = original.RuleTypeFirewallPolicyRule
+)
+
type SecurityRuleAccess = original.SecurityRuleAccess
const (
@@ -1032,6 +1079,7 @@ type ApplicationGatewaysDeleteFuture = original.ApplicationGatewaysDeleteFuture
type ApplicationGatewaysStartFuture = original.ApplicationGatewaysStartFuture
type ApplicationGatewaysStopFuture = original.ApplicationGatewaysStopFuture
type ApplicationGatewaysUpdateTagsFuture = original.ApplicationGatewaysUpdateTagsFuture
+type ApplicationRuleCondition = original.ApplicationRuleCondition
type ApplicationSecurityGroup = original.ApplicationSecurityGroup
type ApplicationSecurityGroupListResult = original.ApplicationSecurityGroupListResult
type ApplicationSecurityGroupListResultIterator = original.ApplicationSecurityGroupListResultIterator
@@ -1092,6 +1140,7 @@ type AzureFirewallNetworkRule = original.AzureFirewallNetworkRule
type AzureFirewallNetworkRuleCollection = original.AzureFirewallNetworkRuleCollection
type AzureFirewallNetworkRuleCollectionPropertiesFormat = original.AzureFirewallNetworkRuleCollectionPropertiesFormat
type AzureFirewallPropertiesFormat = original.AzureFirewallPropertiesFormat
+type AzureFirewallPublicIPAddress = original.AzureFirewallPublicIPAddress
type AzureFirewallRCAction = original.AzureFirewallRCAction
type AzureFirewallsClient = original.AzureFirewallsClient
type AzureFirewallsCreateOrUpdateFuture = original.AzureFirewallsCreateOrUpdateFuture
@@ -1105,6 +1154,8 @@ type BGPCommunity = original.BGPCommunity
type BackendAddressPool = original.BackendAddressPool
type BackendAddressPoolPropertiesFormat = original.BackendAddressPoolPropertiesFormat
type BaseClient = original.BaseClient
+type BasicFirewallPolicyRule = original.BasicFirewallPolicyRule
+type BasicFirewallPolicyRuleCondition = original.BasicFirewallPolicyRuleCondition
type BastionHost = original.BastionHost
type BastionHostIPConfiguration = original.BastionHostIPConfiguration
type BastionHostIPConfigurationPropertiesFormat = original.BastionHostIPConfigurationPropertiesFormat
@@ -1306,6 +1357,29 @@ type ExpressRouteServiceProviderListResultIterator = original.ExpressRouteServic
type ExpressRouteServiceProviderListResultPage = original.ExpressRouteServiceProviderListResultPage
type ExpressRouteServiceProviderPropertiesFormat = original.ExpressRouteServiceProviderPropertiesFormat
type ExpressRouteServiceProvidersClient = original.ExpressRouteServiceProvidersClient
+type FirewallPoliciesClient = original.FirewallPoliciesClient
+type FirewallPoliciesCreateOrUpdateFuture = original.FirewallPoliciesCreateOrUpdateFuture
+type FirewallPoliciesDeleteFuture = original.FirewallPoliciesDeleteFuture
+type FirewallPolicy = original.FirewallPolicy
+type FirewallPolicyFilterRule = original.FirewallPolicyFilterRule
+type FirewallPolicyFilterRuleAction = original.FirewallPolicyFilterRuleAction
+type FirewallPolicyListResult = original.FirewallPolicyListResult
+type FirewallPolicyListResultIterator = original.FirewallPolicyListResultIterator
+type FirewallPolicyListResultPage = original.FirewallPolicyListResultPage
+type FirewallPolicyNatRule = original.FirewallPolicyNatRule
+type FirewallPolicyNatRuleAction = original.FirewallPolicyNatRuleAction
+type FirewallPolicyPropertiesFormat = original.FirewallPolicyPropertiesFormat
+type FirewallPolicyRule = original.FirewallPolicyRule
+type FirewallPolicyRuleCondition = original.FirewallPolicyRuleCondition
+type FirewallPolicyRuleConditionApplicationProtocol = original.FirewallPolicyRuleConditionApplicationProtocol
+type FirewallPolicyRuleGroup = original.FirewallPolicyRuleGroup
+type FirewallPolicyRuleGroupListResult = original.FirewallPolicyRuleGroupListResult
+type FirewallPolicyRuleGroupListResultIterator = original.FirewallPolicyRuleGroupListResultIterator
+type FirewallPolicyRuleGroupListResultPage = original.FirewallPolicyRuleGroupListResultPage
+type FirewallPolicyRuleGroupProperties = original.FirewallPolicyRuleGroupProperties
+type FirewallPolicyRuleGroupsClient = original.FirewallPolicyRuleGroupsClient
+type FirewallPolicyRuleGroupsCreateOrUpdateFuture = original.FirewallPolicyRuleGroupsCreateOrUpdateFuture
+type FirewallPolicyRuleGroupsDeleteFuture = original.FirewallPolicyRuleGroupsDeleteFuture
type FlowLogFormatParameters = original.FlowLogFormatParameters
type FlowLogInformation = original.FlowLogInformation
type FlowLogProperties = original.FlowLogProperties
@@ -1317,6 +1391,7 @@ type GatewayRouteListResult = original.GatewayRouteListResult
type GetVpnSitesConfigurationRequest = original.GetVpnSitesConfigurationRequest
type HTTPConfiguration = original.HTTPConfiguration
type HTTPHeader = original.HTTPHeader
+type HubIPAddresses = original.HubIPAddresses
type HubVirtualNetworkConnection = original.HubVirtualNetworkConnection
type HubVirtualNetworkConnectionProperties = original.HubVirtualNetworkConnectionProperties
type HubVirtualNetworkConnectionsClient = original.HubVirtualNetworkConnectionsClient
@@ -1620,6 +1695,7 @@ type RouteTablesUpdateTagsFuture = original.RouteTablesUpdateTagsFuture
type RoutesClient = original.RoutesClient
type RoutesCreateOrUpdateFuture = original.RoutesCreateOrUpdateFuture
type RoutesDeleteFuture = original.RoutesDeleteFuture
+type RuleCondition = original.RuleCondition
type SecurityGroup = original.SecurityGroup
type SecurityGroupListResult = original.SecurityGroupListResult
type SecurityGroupListResultIterator = original.SecurityGroupListResultIterator
@@ -1682,6 +1758,7 @@ type SubnetsClient = original.SubnetsClient
type SubnetsCreateOrUpdateFuture = original.SubnetsCreateOrUpdateFuture
type SubnetsDeleteFuture = original.SubnetsDeleteFuture
type SubnetsPrepareNetworkPoliciesFuture = original.SubnetsPrepareNetworkPoliciesFuture
+type SubnetsUnprepareNetworkPoliciesFuture = original.SubnetsUnprepareNetworkPoliciesFuture
type TagsObject = original.TagsObject
type Topology = original.Topology
type TopologyAssociation = original.TopologyAssociation
@@ -1695,6 +1772,7 @@ type TroubleshootingProperties = original.TroubleshootingProperties
type TroubleshootingRecommendedActions = original.TroubleshootingRecommendedActions
type TroubleshootingResult = original.TroubleshootingResult
type TunnelConnectionHealth = original.TunnelConnectionHealth
+type UnprepareNetworkPoliciesRequest = original.UnprepareNetworkPoliciesRequest
type Usage = original.Usage
type UsageName = original.UsageName
type UsagesClient = original.UsagesClient
@@ -2149,6 +2227,30 @@ func NewExpressRouteServiceProvidersClient(subscriptionID string) ExpressRouteSe
func NewExpressRouteServiceProvidersClientWithBaseURI(baseURI string, subscriptionID string) ExpressRouteServiceProvidersClient {
return original.NewExpressRouteServiceProvidersClientWithBaseURI(baseURI, subscriptionID)
}
+func NewFirewallPoliciesClient(subscriptionID string) FirewallPoliciesClient {
+ return original.NewFirewallPoliciesClient(subscriptionID)
+}
+func NewFirewallPoliciesClientWithBaseURI(baseURI string, subscriptionID string) FirewallPoliciesClient {
+ return original.NewFirewallPoliciesClientWithBaseURI(baseURI, subscriptionID)
+}
+func NewFirewallPolicyListResultIterator(page FirewallPolicyListResultPage) FirewallPolicyListResultIterator {
+ return original.NewFirewallPolicyListResultIterator(page)
+}
+func NewFirewallPolicyListResultPage(getNextPage func(context.Context, FirewallPolicyListResult) (FirewallPolicyListResult, error)) FirewallPolicyListResultPage {
+ return original.NewFirewallPolicyListResultPage(getNextPage)
+}
+func NewFirewallPolicyRuleGroupListResultIterator(page FirewallPolicyRuleGroupListResultPage) FirewallPolicyRuleGroupListResultIterator {
+ return original.NewFirewallPolicyRuleGroupListResultIterator(page)
+}
+func NewFirewallPolicyRuleGroupListResultPage(getNextPage func(context.Context, FirewallPolicyRuleGroupListResult) (FirewallPolicyRuleGroupListResult, error)) FirewallPolicyRuleGroupListResultPage {
+ return original.NewFirewallPolicyRuleGroupListResultPage(getNextPage)
+}
+func NewFirewallPolicyRuleGroupsClient(subscriptionID string) FirewallPolicyRuleGroupsClient {
+ return original.NewFirewallPolicyRuleGroupsClient(subscriptionID)
+}
+func NewFirewallPolicyRuleGroupsClientWithBaseURI(baseURI string, subscriptionID string) FirewallPolicyRuleGroupsClient {
+ return original.NewFirewallPolicyRuleGroupsClientWithBaseURI(baseURI, subscriptionID)
+}
func NewHubVirtualNetworkConnectionsClient(subscriptionID string) HubVirtualNetworkConnectionsClient {
return original.NewHubVirtualNetworkConnectionsClient(subscriptionID)
}
@@ -2902,6 +3004,18 @@ func PossibleExpressRoutePeeringTypeValues() []ExpressRoutePeeringType {
func PossibleExpressRoutePortsEncapsulationValues() []ExpressRoutePortsEncapsulation {
return original.PossibleExpressRoutePortsEncapsulationValues()
}
+func PossibleFirewallPolicyFilterRuleActionTypeValues() []FirewallPolicyFilterRuleActionType {
+ return original.PossibleFirewallPolicyFilterRuleActionTypeValues()
+}
+func PossibleFirewallPolicyNatRuleActionTypeValues() []FirewallPolicyNatRuleActionType {
+ return original.PossibleFirewallPolicyNatRuleActionTypeValues()
+}
+func PossibleFirewallPolicyRuleConditionApplicationProtocolTypeValues() []FirewallPolicyRuleConditionApplicationProtocolType {
+ return original.PossibleFirewallPolicyRuleConditionApplicationProtocolTypeValues()
+}
+func PossibleFirewallPolicyRuleConditionNetworkProtocolValues() []FirewallPolicyRuleConditionNetworkProtocol {
+ return original.PossibleFirewallPolicyRuleConditionNetworkProtocolValues()
+}
func PossibleFlowLogFormatTypeValues() []FlowLogFormatType {
return original.PossibleFlowLogFormatTypeValues()
}
@@ -2995,6 +3109,12 @@ func PossibleResourceIdentityTypeValues() []ResourceIdentityType {
func PossibleRouteNextHopTypeValues() []RouteNextHopType {
return original.PossibleRouteNextHopTypeValues()
}
+func PossibleRuleConditionTypeValues() []RuleConditionType {
+ return original.PossibleRuleConditionTypeValues()
+}
+func PossibleRuleTypeValues() []RuleType {
+ return original.PossibleRuleTypeValues()
+}
func PossibleSecurityRuleAccessValues() []SecurityRuleAccess {
return original.PossibleSecurityRuleAccessValues()
}
diff --git a/profiles/latest/network/mgmt/network/networkapi/models.go b/profiles/latest/network/mgmt/network/networkapi/models.go
index b6dc4a0e532b..13c640036a2a 100644
--- a/profiles/latest/network/mgmt/network/networkapi/models.go
+++ b/profiles/latest/network/mgmt/network/networkapi/models.go
@@ -48,6 +48,8 @@ type ExpressRouteLinksClientAPI = original.ExpressRouteLinksClientAPI
type ExpressRoutePortsClientAPI = original.ExpressRoutePortsClientAPI
type ExpressRoutePortsLocationsClientAPI = original.ExpressRoutePortsLocationsClientAPI
type ExpressRouteServiceProvidersClientAPI = original.ExpressRouteServiceProvidersClientAPI
+type FirewallPoliciesClientAPI = original.FirewallPoliciesClientAPI
+type FirewallPolicyRuleGroupsClientAPI = original.FirewallPolicyRuleGroupsClientAPI
type HubVirtualNetworkConnectionsClientAPI = original.HubVirtualNetworkConnectionsClientAPI
type InboundNatRulesClientAPI = original.InboundNatRulesClientAPI
type InterfaceIPConfigurationsClientAPI = original.InterfaceIPConfigurationsClientAPI
diff --git a/profiles/latest/resources/mgmt/policy/models.go b/profiles/latest/resources/mgmt/policy/models.go
index 7b2a06150421..a192ab1aeeee 100644
--- a/profiles/latest/resources/mgmt/policy/models.go
+++ b/profiles/latest/resources/mgmt/policy/models.go
@@ -22,13 +22,20 @@ package policy
import (
"context"
- original "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-01-01/policy"
+ original "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/policy"
)
const (
DefaultBaseURI = original.DefaultBaseURI
)
+type EnforcementMode = original.EnforcementMode
+
+const (
+ Default EnforcementMode = original.Default
+ DoNotEnforce EnforcementMode = original.DoNotEnforce
+)
+
type ResourceIdentityType = original.ResourceIdentityType
const (
@@ -110,6 +117,9 @@ func NewSetDefinitionsClientWithBaseURI(baseURI string, subscriptionID string) S
func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient {
return original.NewWithBaseURI(baseURI, subscriptionID)
}
+func PossibleEnforcementModeValues() []EnforcementMode {
+ return original.PossibleEnforcementModeValues()
+}
func PossibleResourceIdentityTypeValues() []ResourceIdentityType {
return original.PossibleResourceIdentityTypeValues()
}
diff --git a/profiles/latest/resources/mgmt/policy/policyapi/models.go b/profiles/latest/resources/mgmt/policy/policyapi/models.go
index a4c9803aa76a..be7cec1ae017 100644
--- a/profiles/latest/resources/mgmt/policy/policyapi/models.go
+++ b/profiles/latest/resources/mgmt/policy/policyapi/models.go
@@ -19,7 +19,7 @@
package policyapi
-import original "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-01-01/policy/policyapi"
+import original "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/policy/policyapi"
type AssignmentsClientAPI = original.AssignmentsClientAPI
type DefinitionsClientAPI = original.DefinitionsClientAPI
diff --git a/profiles/latest/resources/mgmt/resources/models.go b/profiles/latest/resources/mgmt/resources/models.go
index 102c462c41fa..75481a5726dc 100644
--- a/profiles/latest/resources/mgmt/resources/models.go
+++ b/profiles/latest/resources/mgmt/resources/models.go
@@ -57,6 +57,7 @@ type AliasType = original.AliasType
type BaseClient = original.BaseClient
type BasicDependency = original.BasicDependency
type Client = original.Client
+type CloudError = original.CloudError
type CreateOrUpdateByIDFuture = original.CreateOrUpdateByIDFuture
type CreateOrUpdateFuture = original.CreateOrUpdateFuture
type DebugSetting = original.DebugSetting
@@ -86,6 +87,8 @@ type DeploymentsCreateOrUpdateFuture = original.DeploymentsCreateOrUpdateFuture
type DeploymentsDeleteAtManagementGroupScopeFuture = original.DeploymentsDeleteAtManagementGroupScopeFuture
type DeploymentsDeleteAtSubscriptionScopeFuture = original.DeploymentsDeleteAtSubscriptionScopeFuture
type DeploymentsDeleteFuture = original.DeploymentsDeleteFuture
+type ErrorAdditionalInfo = original.ErrorAdditionalInfo
+type ErrorResponse = original.ErrorResponse
type ExportTemplateRequest = original.ExportTemplateRequest
type GenericResource = original.GenericResource
type GenericResourceFilter = original.GenericResourceFilter
@@ -136,6 +139,7 @@ type TagsListResult = original.TagsListResult
type TagsListResultIterator = original.TagsListResultIterator
type TagsListResultPage = original.TagsListResultPage
type TargetResource = original.TargetResource
+type TemplateHashResult = original.TemplateHashResult
type TemplateLink = original.TemplateLink
type UpdateByIDFuture = original.UpdateByIDFuture
type UpdateFuture = original.UpdateFuture
diff --git a/profiles/latest/servicefabric/mgmt/servicefabric/models.go b/profiles/latest/servicefabric/mgmt/servicefabric/models.go
index b836cce76496..0f8b50fa3cc8 100644
--- a/profiles/latest/servicefabric/mgmt/servicefabric/models.go
+++ b/profiles/latest/servicefabric/mgmt/servicefabric/models.go
@@ -36,6 +36,13 @@ const (
SharedProcess ArmServicePackageActivationMode = original.SharedProcess
)
+type ArmUpgradeFailureAction = original.ArmUpgradeFailureAction
+
+const (
+ Manual ArmUpgradeFailureAction = original.Manual
+ Rollback ArmUpgradeFailureAction = original.Rollback
+)
+
type ClusterState = original.ClusterState
const (
@@ -184,8 +191,8 @@ const (
type UpgradeMode = original.UpgradeMode
const (
- Automatic UpgradeMode = original.Automatic
- Manual UpgradeMode = original.Manual
+ UpgradeModeAutomatic UpgradeMode = original.UpgradeModeAutomatic
+ UpgradeModeManual UpgradeMode = original.UpgradeModeManual
)
type UpgradeMode1 = original.UpgradeMode1
@@ -236,16 +243,18 @@ type ApplicationTypeVersionResource = original.ApplicationTypeVersionResource
type ApplicationTypeVersionResourceList = original.ApplicationTypeVersionResourceList
type ApplicationTypeVersionResourceProperties = original.ApplicationTypeVersionResourceProperties
type ApplicationTypeVersionsClient = original.ApplicationTypeVersionsClient
-type ApplicationTypeVersionsCreateFuture = original.ApplicationTypeVersionsCreateFuture
+type ApplicationTypeVersionsCreateOrUpdateFuture = original.ApplicationTypeVersionsCreateOrUpdateFuture
type ApplicationTypeVersionsDeleteFuture = original.ApplicationTypeVersionsDeleteFuture
type ApplicationTypesClient = original.ApplicationTypesClient
type ApplicationTypesDeleteFuture = original.ApplicationTypesDeleteFuture
type ApplicationUpgradePolicy = original.ApplicationUpgradePolicy
type ApplicationsClient = original.ApplicationsClient
-type ApplicationsCreateFuture = original.ApplicationsCreateFuture
+type ApplicationsCreateOrUpdateFuture = original.ApplicationsCreateOrUpdateFuture
type ApplicationsDeleteFuture = original.ApplicationsDeleteFuture
type ApplicationsUpdateFuture = original.ApplicationsUpdateFuture
type ArmApplicationHealthPolicy = original.ArmApplicationHealthPolicy
+type ArmRollingUpgradeMonitoringPolicy = original.ArmRollingUpgradeMonitoringPolicy
+type ArmServiceTypeHealthPolicy = original.ArmServiceTypeHealthPolicy
type AvailableOperationDisplay = original.AvailableOperationDisplay
type AzureActiveDirectory = original.AzureActiveDirectory
type BaseClient = original.BaseClient
@@ -269,7 +278,7 @@ type ClusterUpgradePolicy = original.ClusterUpgradePolicy
type ClusterVersionDetails = original.ClusterVersionDetails
type ClusterVersionsClient = original.ClusterVersionsClient
type ClustersClient = original.ClustersClient
-type ClustersCreateFuture = original.ClustersCreateFuture
+type ClustersCreateOrUpdateFuture = original.ClustersCreateOrUpdateFuture
type ClustersUpdateFuture = original.ClustersUpdateFuture
type DiagnosticsStorageAccountConfig = original.DiagnosticsStorageAccountConfig
type EndpointRangeDescription = original.EndpointRangeDescription
@@ -285,7 +294,6 @@ type OperationsClient = original.OperationsClient
type PartitionSchemeDescription = original.PartitionSchemeDescription
type ProxyResource = original.ProxyResource
type Resource = original.Resource
-type RollingUpgradeMonitoringPolicy = original.RollingUpgradeMonitoringPolicy
type ServerCertificateCommonName = original.ServerCertificateCommonName
type ServerCertificateCommonNames = original.ServerCertificateCommonNames
type ServiceCorrelationDescription = original.ServiceCorrelationDescription
@@ -300,7 +308,7 @@ type ServiceResourceUpdateProperties = original.ServiceResourceUpdateProperties
type ServiceTypeDeltaHealthPolicy = original.ServiceTypeDeltaHealthPolicy
type ServiceTypeHealthPolicy = original.ServiceTypeHealthPolicy
type ServicesClient = original.ServicesClient
-type ServicesCreateFuture = original.ServicesCreateFuture
+type ServicesCreateOrUpdateFuture = original.ServicesCreateOrUpdateFuture
type ServicesDeleteFuture = original.ServicesDeleteFuture
type ServicesUpdateFuture = original.ServicesUpdateFuture
type SettingsParameterDescription = original.SettingsParameterDescription
@@ -369,6 +377,9 @@ func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient {
func PossibleArmServicePackageActivationModeValues() []ArmServicePackageActivationMode {
return original.PossibleArmServicePackageActivationModeValues()
}
+func PossibleArmUpgradeFailureActionValues() []ArmUpgradeFailureAction {
+ return original.PossibleArmUpgradeFailureActionValues()
+}
func PossibleClusterStateValues() []ClusterState {
return original.PossibleClusterStateValues()
}
diff --git a/profiles/latest/storage/mgmt/storage/models.go b/profiles/latest/storage/mgmt/storage/models.go
index b97e84109424..0c1eee97ccf2 100644
--- a/profiles/latest/storage/mgmt/storage/models.go
+++ b/profiles/latest/storage/mgmt/storage/models.go
@@ -296,6 +296,7 @@ type AzureFilesIdentityBasedAuthentication = original.AzureFilesIdentityBasedAut
type BaseClient = original.BaseClient
type BlobContainer = original.BlobContainer
type BlobContainersClient = original.BlobContainersClient
+type BlobServiceItems = original.BlobServiceItems
type BlobServiceProperties = original.BlobServiceProperties
type BlobServicePropertiesProperties = original.BlobServicePropertiesProperties
type BlobServicesClient = original.BlobServicesClient
@@ -312,6 +313,19 @@ type Encryption = original.Encryption
type EncryptionService = original.EncryptionService
type EncryptionServices = original.EncryptionServices
type Endpoints = original.Endpoints
+type ErrorResponse = original.ErrorResponse
+type ErrorResponseBody = original.ErrorResponseBody
+type FileServiceItems = original.FileServiceItems
+type FileServiceProperties = original.FileServiceProperties
+type FileServicePropertiesProperties = original.FileServicePropertiesProperties
+type FileServicesClient = original.FileServicesClient
+type FileShare = original.FileShare
+type FileShareItem = original.FileShareItem
+type FileShareItems = original.FileShareItems
+type FileShareItemsIterator = original.FileShareItemsIterator
+type FileShareItemsPage = original.FileShareItemsPage
+type FileShareProperties = original.FileShareProperties
+type FileSharesClient = original.FileSharesClient
type GeoReplicationStats = original.GeoReplicationStats
type IPRule = original.IPRule
type Identity = original.Identity
@@ -385,6 +399,24 @@ func NewBlobServicesClient(subscriptionID string) BlobServicesClient {
func NewBlobServicesClientWithBaseURI(baseURI string, subscriptionID string) BlobServicesClient {
return original.NewBlobServicesClientWithBaseURI(baseURI, subscriptionID)
}
+func NewFileServicesClient(subscriptionID string) FileServicesClient {
+ return original.NewFileServicesClient(subscriptionID)
+}
+func NewFileServicesClientWithBaseURI(baseURI string, subscriptionID string) FileServicesClient {
+ return original.NewFileServicesClientWithBaseURI(baseURI, subscriptionID)
+}
+func NewFileShareItemsIterator(page FileShareItemsPage) FileShareItemsIterator {
+ return original.NewFileShareItemsIterator(page)
+}
+func NewFileShareItemsPage(getNextPage func(context.Context, FileShareItems) (FileShareItems, error)) FileShareItemsPage {
+ return original.NewFileShareItemsPage(getNextPage)
+}
+func NewFileSharesClient(subscriptionID string) FileSharesClient {
+ return original.NewFileSharesClient(subscriptionID)
+}
+func NewFileSharesClientWithBaseURI(baseURI string, subscriptionID string) FileSharesClient {
+ return original.NewFileSharesClientWithBaseURI(baseURI, subscriptionID)
+}
func NewListContainerItemsIterator(page ListContainerItemsPage) ListContainerItemsIterator {
return original.NewListContainerItemsIterator(page)
}
diff --git a/profiles/latest/storage/mgmt/storage/storageapi/models.go b/profiles/latest/storage/mgmt/storage/storageapi/models.go
index c11d9c8b5aa7..d9a8d0c91f8f 100644
--- a/profiles/latest/storage/mgmt/storage/storageapi/models.go
+++ b/profiles/latest/storage/mgmt/storage/storageapi/models.go
@@ -24,6 +24,8 @@ import original "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04
type AccountsClientAPI = original.AccountsClientAPI
type BlobContainersClientAPI = original.BlobContainersClientAPI
type BlobServicesClientAPI = original.BlobServicesClientAPI
+type FileServicesClientAPI = original.FileServicesClientAPI
+type FileSharesClientAPI = original.FileSharesClientAPI
type ManagementPoliciesClientAPI = original.ManagementPoliciesClientAPI
type OperationsClientAPI = original.OperationsClientAPI
type SkusClientAPI = original.SkusClientAPI
diff --git a/profiles/preview/appinsights/mgmt/insights/models.go b/profiles/preview/appinsights/mgmt/insights/models.go
index 609896b561ec..a4390fd072bc 100644
--- a/profiles/preview/appinsights/mgmt/insights/models.go
+++ b/profiles/preview/appinsights/mgmt/insights/models.go
@@ -190,7 +190,6 @@ type OperationListResultIterator = original.OperationListResultIterator
type OperationListResultPage = original.OperationListResultPage
type OperationsClient = original.OperationsClient
type ProactiveDetectionConfigurationsClient = original.ProactiveDetectionConfigurationsClient
-type SetObject = original.SetObject
type TagsResource = original.TagsResource
type WebTest = original.WebTest
type WebTestGeolocation = original.WebTestGeolocation
diff --git a/profiles/preview/batch/batch/batchapi/models.go b/profiles/preview/batch/batch/batchapi/models.go
index a868766ef3b4..2454cbf25796 100644
--- a/profiles/preview/batch/batch/batchapi/models.go
+++ b/profiles/preview/batch/batch/batchapi/models.go
@@ -19,7 +19,7 @@
package batchapi
-import original "github.com/Azure/azure-sdk-for-go/services/batch/2019-06-01.9.0/batch/batchapi"
+import original "github.com/Azure/azure-sdk-for-go/services/batch/2019-08-01.10.0/batch/batchapi"
type AccountClientAPI = original.AccountClientAPI
type ApplicationClientAPI = original.ApplicationClientAPI
diff --git a/profiles/preview/batch/batch/models.go b/profiles/preview/batch/batch/models.go
index e69de20fca8c..f42e12232c6d 100644
--- a/profiles/preview/batch/batch/models.go
+++ b/profiles/preview/batch/batch/models.go
@@ -22,7 +22,7 @@ package batch
import (
"context"
- original "github.com/Azure/azure-sdk-for-go/services/batch/2019-06-01.9.0/batch"
+ original "github.com/Azure/azure-sdk-for-go/services/batch/2019-08-01.10.0/batch"
)
type AccessScope = original.AccessScope
@@ -371,7 +371,10 @@ type AutoPoolSpecification = original.AutoPoolSpecification
type AutoScaleRun = original.AutoScaleRun
type AutoScaleRunError = original.AutoScaleRunError
type AutoUserSpecification = original.AutoUserSpecification
+type AzureBlobFileSystemConfiguration = original.AzureBlobFileSystemConfiguration
+type AzureFileShareConfiguration = original.AzureFileShareConfiguration
type BaseClient = original.BaseClient
+type CIFSMountConfiguration = original.CIFSMountConfiguration
type Certificate = original.Certificate
type CertificateAddParameter = original.CertificateAddParameter
type CertificateClient = original.CertificateClient
@@ -454,7 +457,9 @@ type JobTerminateParameter = original.JobTerminateParameter
type JobUpdateParameter = original.JobUpdateParameter
type LinuxUserConfiguration = original.LinuxUserConfiguration
type MetadataItem = original.MetadataItem
+type MountConfiguration = original.MountConfiguration
type MultiInstanceSettings = original.MultiInstanceSettings
+type NFSMountConfiguration = original.NFSMountConfiguration
type NameValuePair = original.NameValuePair
type NetworkConfiguration = original.NetworkConfiguration
type NetworkSecurityGroupRule = original.NetworkSecurityGroupRule
diff --git a/profiles/preview/batch/mgmt/batch/batchapi/models.go b/profiles/preview/batch/mgmt/batch/batchapi/models.go
index d3cd8550e0ef..0a26813bd53e 100644
--- a/profiles/preview/batch/mgmt/batch/batchapi/models.go
+++ b/profiles/preview/batch/mgmt/batch/batchapi/models.go
@@ -19,7 +19,7 @@
package batchapi
-import original "github.com/Azure/azure-sdk-for-go/services/batch/mgmt/2019-04-01/batch/batchapi"
+import original "github.com/Azure/azure-sdk-for-go/services/batch/mgmt/2019-08-01/batch/batchapi"
type AccountClientAPI = original.AccountClientAPI
type ApplicationClientAPI = original.ApplicationClientAPI
diff --git a/profiles/preview/batch/mgmt/batch/models.go b/profiles/preview/batch/mgmt/batch/models.go
index 67157bb631b0..e8456e1022a6 100644
--- a/profiles/preview/batch/mgmt/batch/models.go
+++ b/profiles/preview/batch/mgmt/batch/models.go
@@ -22,7 +22,7 @@ package batch
import (
"context"
- original "github.com/Azure/azure-sdk-for-go/services/batch/mgmt/2019-04-01/batch"
+ original "github.com/Azure/azure-sdk-for-go/services/batch/mgmt/2019-08-01/batch"
)
const (
@@ -105,6 +105,13 @@ const (
Spread ComputeNodeFillType = original.Spread
)
+type ContainerWorkingDirectory = original.ContainerWorkingDirectory
+
+const (
+ ContainerImageDefault ContainerWorkingDirectory = original.ContainerImageDefault
+ TaskWorkingDirectory ContainerWorkingDirectory = original.TaskWorkingDirectory
+)
+
type ElevationLevel = original.ElevationLevel
const (
@@ -214,7 +221,10 @@ type AutoScaleSettings = original.AutoScaleSettings
type AutoStorageBaseProperties = original.AutoStorageBaseProperties
type AutoStorageProperties = original.AutoStorageProperties
type AutoUserSpecification = original.AutoUserSpecification
+type AzureBlobFileSystemConfiguration = original.AzureBlobFileSystemConfiguration
+type AzureFileShareConfiguration = original.AzureFileShareConfiguration
type BaseClient = original.BaseClient
+type CIFSMountConfiguration = original.CIFSMountConfiguration
type Certificate = original.Certificate
type CertificateBaseProperties = original.CertificateBaseProperties
type CertificateClient = original.CertificateClient
@@ -255,6 +265,8 @@ type ListPoolsResultPage = original.ListPoolsResultPage
type LocationClient = original.LocationClient
type LocationQuota = original.LocationQuota
type MetadataItem = original.MetadataItem
+type MountConfiguration = original.MountConfiguration
+type NFSMountConfiguration = original.NFSMountConfiguration
type NetworkConfiguration = original.NetworkConfiguration
type NetworkSecurityGroupRule = original.NetworkSecurityGroupRule
type Operation = original.Operation
@@ -399,6 +411,9 @@ func PossibleComputeNodeDeallocationOptionValues() []ComputeNodeDeallocationOpti
func PossibleComputeNodeFillTypeValues() []ComputeNodeFillType {
return original.PossibleComputeNodeFillTypeValues()
}
+func PossibleContainerWorkingDirectoryValues() []ContainerWorkingDirectory {
+ return original.PossibleContainerWorkingDirectoryValues()
+}
func PossibleElevationLevelValues() []ElevationLevel {
return original.PossibleElevationLevelValues()
}
diff --git a/profiles/preview/cognitiveservices/computervision/computervisionapi/models.go b/profiles/preview/cognitiveservices/computervision/computervisionapi/models.go
index f6a25fe21a6e..27a1ed2e9e90 100644
--- a/profiles/preview/cognitiveservices/computervision/computervisionapi/models.go
+++ b/profiles/preview/cognitiveservices/computervision/computervisionapi/models.go
@@ -19,6 +19,6 @@
package computervisionapi
-import original "github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v2.0/computervision/computervisionapi"
+import original "github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v2.1/computervision/computervisionapi"
type BaseClientAPI = original.BaseClientAPI
diff --git a/profiles/preview/cognitiveservices/computervision/models.go b/profiles/preview/cognitiveservices/computervision/models.go
index fa380eabb136..c2b072a0ed70 100644
--- a/profiles/preview/cognitiveservices/computervision/models.go
+++ b/profiles/preview/cognitiveservices/computervision/models.go
@@ -19,13 +19,20 @@
package computervision
-import original "github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v2.0/computervision"
+import original "github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v2.1/computervision"
+
+type DescriptionExclude = original.DescriptionExclude
+
+const (
+ Celebrities DescriptionExclude = original.Celebrities
+ Landmarks DescriptionExclude = original.Landmarks
+)
type Details = original.Details
const (
- Celebrities Details = original.Celebrities
- Landmarks Details = original.Landmarks
+ DetailsCelebrities Details = original.DetailsCelebrities
+ DetailsLandmarks Details = original.DetailsLandmarks
)
type Gender = original.Gender
@@ -158,6 +165,9 @@ func New(endpoint string) BaseClient {
func NewWithoutDefaults(endpoint string) BaseClient {
return original.NewWithoutDefaults(endpoint)
}
+func PossibleDescriptionExcludeValues() []DescriptionExclude {
+ return original.PossibleDescriptionExcludeValues()
+}
func PossibleDetailsValues() []Details {
return original.PossibleDetailsValues()
}
diff --git a/profiles/preview/cognitiveservices/qnamaker/models.go b/profiles/preview/cognitiveservices/qnamaker/models.go
index 8c67cecde797..4329ee96e118 100644
--- a/profiles/preview/cognitiveservices/qnamaker/models.go
+++ b/profiles/preview/cognitiveservices/qnamaker/models.go
@@ -47,13 +47,6 @@ const (
ValidationFailure ErrorCodeType = original.ValidationFailure
)
-type KnowledgebaseEnvironmentType = original.KnowledgebaseEnvironmentType
-
-const (
- KnowledgebaseEnvironmentTypeProd KnowledgebaseEnvironmentType = original.KnowledgebaseEnvironmentTypeProd
- KnowledgebaseEnvironmentTypeTest KnowledgebaseEnvironmentType = original.KnowledgebaseEnvironmentTypeTest
-)
-
type OperationStateType = original.OperationStateType
const (
@@ -135,9 +128,6 @@ func PossibleEnvironmentTypeValues() []EnvironmentType {
func PossibleErrorCodeTypeValues() []ErrorCodeType {
return original.PossibleErrorCodeTypeValues()
}
-func PossibleKnowledgebaseEnvironmentTypeValues() []KnowledgebaseEnvironmentType {
- return original.PossibleKnowledgebaseEnvironmentTypeValues()
-}
func PossibleOperationStateTypeValues() []OperationStateType {
return original.PossibleOperationStateTypeValues()
}
diff --git a/profiles/preview/compute/mgmt/compute/computeapi/models.go b/profiles/preview/compute/mgmt/compute/computeapi/models.go
index 26b46b42c4b6..dcae23d3b907 100644
--- a/profiles/preview/compute/mgmt/compute/computeapi/models.go
+++ b/profiles/preview/compute/mgmt/compute/computeapi/models.go
@@ -27,6 +27,8 @@ type DedicatedHostGroupsClientAPI = original.DedicatedHostGroupsClientAPI
type DedicatedHostsClientAPI = original.DedicatedHostsClientAPI
type DisksClientAPI = original.DisksClientAPI
type GalleriesClientAPI = original.GalleriesClientAPI
+type GalleryApplicationVersionsClientAPI = original.GalleryApplicationVersionsClientAPI
+type GalleryApplicationsClientAPI = original.GalleryApplicationsClientAPI
type GalleryImageVersionsClientAPI = original.GalleryImageVersionsClientAPI
type GalleryImagesClientAPI = original.GalleryImagesClientAPI
type ImagesClientAPI = original.ImagesClientAPI
diff --git a/profiles/preview/compute/mgmt/compute/models.go b/profiles/preview/compute/mgmt/compute/models.go
index 2def857fdc27..c83bb89dce7c 100644
--- a/profiles/preview/compute/mgmt/compute/models.go
+++ b/profiles/preview/compute/mgmt/compute/models.go
@@ -302,6 +302,17 @@ const (
ProvisioningState2Updating ProvisioningState2 = original.ProvisioningState2Updating
)
+type ProvisioningState3 = original.ProvisioningState3
+
+const (
+ ProvisioningState3Creating ProvisioningState3 = original.ProvisioningState3Creating
+ ProvisioningState3Deleting ProvisioningState3 = original.ProvisioningState3Deleting
+ ProvisioningState3Failed ProvisioningState3 = original.ProvisioningState3Failed
+ ProvisioningState3Migrating ProvisioningState3 = original.ProvisioningState3Migrating
+ ProvisioningState3Succeeded ProvisioningState3 = original.ProvisioningState3Succeeded
+ ProvisioningState3Updating ProvisioningState3 = original.ProvisioningState3Updating
+)
+
type ProximityPlacementGroupType = original.ProximityPlacementGroupType
const (
@@ -643,6 +654,7 @@ type AvailabilitySetProperties = original.AvailabilitySetProperties
type AvailabilitySetUpdate = original.AvailabilitySetUpdate
type AvailabilitySetsClient = original.AvailabilitySetsClient
type BaseClient = original.BaseClient
+type BillingProfile = original.BillingProfile
type BootDiagnostics = original.BootDiagnostics
type BootDiagnosticsInstanceView = original.BootDiagnosticsInstanceView
type CloudError = original.CloudError
@@ -713,7 +725,25 @@ type GalleriesClient = original.GalleriesClient
type GalleriesCreateOrUpdateFuture = original.GalleriesCreateOrUpdateFuture
type GalleriesDeleteFuture = original.GalleriesDeleteFuture
type Gallery = original.Gallery
+type GalleryApplication = original.GalleryApplication
+type GalleryApplicationList = original.GalleryApplicationList
+type GalleryApplicationListIterator = original.GalleryApplicationListIterator
+type GalleryApplicationListPage = original.GalleryApplicationListPage
+type GalleryApplicationProperties = original.GalleryApplicationProperties
+type GalleryApplicationVersion = original.GalleryApplicationVersion
+type GalleryApplicationVersionList = original.GalleryApplicationVersionList
+type GalleryApplicationVersionListIterator = original.GalleryApplicationVersionListIterator
+type GalleryApplicationVersionListPage = original.GalleryApplicationVersionListPage
+type GalleryApplicationVersionProperties = original.GalleryApplicationVersionProperties
+type GalleryApplicationVersionPublishingProfile = original.GalleryApplicationVersionPublishingProfile
+type GalleryApplicationVersionsClient = original.GalleryApplicationVersionsClient
+type GalleryApplicationVersionsCreateOrUpdateFuture = original.GalleryApplicationVersionsCreateOrUpdateFuture
+type GalleryApplicationVersionsDeleteFuture = original.GalleryApplicationVersionsDeleteFuture
+type GalleryApplicationsClient = original.GalleryApplicationsClient
+type GalleryApplicationsCreateOrUpdateFuture = original.GalleryApplicationsCreateOrUpdateFuture
+type GalleryApplicationsDeleteFuture = original.GalleryApplicationsDeleteFuture
type GalleryArtifactPublishingProfileBase = original.GalleryArtifactPublishingProfileBase
+type GalleryArtifactSource = original.GalleryArtifactSource
type GalleryArtifactVersionSource = original.GalleryArtifactVersionSource
type GalleryDataDiskImage = original.GalleryDataDiskImage
type GalleryDiskImage = original.GalleryDiskImage
@@ -779,6 +809,7 @@ type LogAnalyticsInputBase = original.LogAnalyticsInputBase
type LogAnalyticsOperationResult = original.LogAnalyticsOperationResult
type LogAnalyticsOutput = original.LogAnalyticsOutput
type MaintenanceRedeployStatus = original.MaintenanceRedeployStatus
+type ManagedArtifact = original.ManagedArtifact
type ManagedDiskParameters = original.ManagedDiskParameters
type NetworkInterfaceReference = original.NetworkInterfaceReference
type NetworkInterfaceReferenceProperties = original.NetworkInterfaceReferenceProperties
@@ -835,6 +866,7 @@ type RunCommandParameterDefinition = original.RunCommandParameterDefinition
type RunCommandResult = original.RunCommandResult
type SSHConfiguration = original.SSHConfiguration
type SSHPublicKey = original.SSHPublicKey
+type ScheduledEventsProfile = original.ScheduledEventsProfile
type Sku = original.Sku
type Snapshot = original.Snapshot
type SnapshotList = original.SnapshotList
@@ -855,6 +887,7 @@ type StorageProfile = original.StorageProfile
type SubResource = original.SubResource
type SubResourceReadOnly = original.SubResourceReadOnly
type TargetRegion = original.TargetRegion
+type TerminateNotificationProfile = original.TerminateNotificationProfile
type ThrottledRequestsInput = original.ThrottledRequestsInput
type UpdateResource = original.UpdateResource
type UpgradeOperationHistoricalStatusInfo = original.UpgradeOperationHistoricalStatusInfo
@@ -864,6 +897,7 @@ type UpgradePolicy = original.UpgradePolicy
type Usage = original.Usage
type UsageClient = original.UsageClient
type UsageName = original.UsageName
+type UserArtifactSource = original.UserArtifactSource
type VMScaleSetConvertToSinglePlacementGroupInput = original.VMScaleSetConvertToSinglePlacementGroupInput
type VaultCertificate = original.VaultCertificate
type VaultSecretGroup = original.VaultSecretGroup
@@ -1093,6 +1127,30 @@ func NewGalleriesClient(subscriptionID string) GalleriesClient {
func NewGalleriesClientWithBaseURI(baseURI string, subscriptionID string) GalleriesClient {
return original.NewGalleriesClientWithBaseURI(baseURI, subscriptionID)
}
+func NewGalleryApplicationListIterator(page GalleryApplicationListPage) GalleryApplicationListIterator {
+ return original.NewGalleryApplicationListIterator(page)
+}
+func NewGalleryApplicationListPage(getNextPage func(context.Context, GalleryApplicationList) (GalleryApplicationList, error)) GalleryApplicationListPage {
+ return original.NewGalleryApplicationListPage(getNextPage)
+}
+func NewGalleryApplicationVersionListIterator(page GalleryApplicationVersionListPage) GalleryApplicationVersionListIterator {
+ return original.NewGalleryApplicationVersionListIterator(page)
+}
+func NewGalleryApplicationVersionListPage(getNextPage func(context.Context, GalleryApplicationVersionList) (GalleryApplicationVersionList, error)) GalleryApplicationVersionListPage {
+ return original.NewGalleryApplicationVersionListPage(getNextPage)
+}
+func NewGalleryApplicationVersionsClient(subscriptionID string) GalleryApplicationVersionsClient {
+ return original.NewGalleryApplicationVersionsClient(subscriptionID)
+}
+func NewGalleryApplicationVersionsClientWithBaseURI(baseURI string, subscriptionID string) GalleryApplicationVersionsClient {
+ return original.NewGalleryApplicationVersionsClientWithBaseURI(baseURI, subscriptionID)
+}
+func NewGalleryApplicationsClient(subscriptionID string) GalleryApplicationsClient {
+ return original.NewGalleryApplicationsClient(subscriptionID)
+}
+func NewGalleryApplicationsClientWithBaseURI(baseURI string, subscriptionID string) GalleryApplicationsClient {
+ return original.NewGalleryApplicationsClientWithBaseURI(baseURI, subscriptionID)
+}
func NewGalleryImageListIterator(page GalleryImageListPage) GalleryImageListIterator {
return original.NewGalleryImageListIterator(page)
}
@@ -1387,6 +1445,9 @@ func PossibleProvisioningState1Values() []ProvisioningState1 {
func PossibleProvisioningState2Values() []ProvisioningState2 {
return original.PossibleProvisioningState2Values()
}
+func PossibleProvisioningState3Values() []ProvisioningState3 {
+ return original.PossibleProvisioningState3Values()
+}
func PossibleProvisioningStateValues() []ProvisioningState {
return original.PossibleProvisioningStateValues()
}
diff --git a/profiles/preview/containerservice/mgmt/containerservice/containerserviceapi/models.go b/profiles/preview/containerservice/mgmt/containerservice/containerserviceapi/models.go
index fbc727a11a46..2529a9c3888f 100644
--- a/profiles/preview/containerservice/mgmt/containerservice/containerserviceapi/models.go
+++ b/profiles/preview/containerservice/mgmt/containerservice/containerserviceapi/models.go
@@ -19,7 +19,7 @@
package containerserviceapi
-import original "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2019-06-01/containerservice/containerserviceapi"
+import original "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2019-08-01/containerservice/containerserviceapi"
type AgentPoolsClientAPI = original.AgentPoolsClientAPI
type ContainerServicesClientAPI = original.ContainerServicesClientAPI
diff --git a/profiles/preview/containerservice/mgmt/containerservice/models.go b/profiles/preview/containerservice/mgmt/containerservice/models.go
index 2b6f0f228781..eb51fbccee19 100644
--- a/profiles/preview/containerservice/mgmt/containerservice/models.go
+++ b/profiles/preview/containerservice/mgmt/containerservice/models.go
@@ -22,7 +22,7 @@ package containerservice
import (
"context"
- original "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2019-06-01/containerservice"
+ original "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2019-08-01/containerservice"
)
const (
@@ -367,6 +367,7 @@ type ListResultIterator = original.ListResultIterator
type ListResultPage = original.ListResultPage
type ManagedCluster = original.ManagedCluster
type ManagedClusterAADProfile = original.ManagedClusterAADProfile
+type ManagedClusterAPIServerAccessProfile = original.ManagedClusterAPIServerAccessProfile
type ManagedClusterAccessProfile = original.ManagedClusterAccessProfile
type ManagedClusterAddonProfile = original.ManagedClusterAddonProfile
type ManagedClusterAgentPoolProfile = original.ManagedClusterAgentPoolProfile
@@ -375,6 +376,10 @@ type ManagedClusterIdentity = original.ManagedClusterIdentity
type ManagedClusterListResult = original.ManagedClusterListResult
type ManagedClusterListResultIterator = original.ManagedClusterListResultIterator
type ManagedClusterListResultPage = original.ManagedClusterListResultPage
+type ManagedClusterLoadBalancerProfile = original.ManagedClusterLoadBalancerProfile
+type ManagedClusterLoadBalancerProfileManagedOutboundIPs = original.ManagedClusterLoadBalancerProfileManagedOutboundIPs
+type ManagedClusterLoadBalancerProfileOutboundIPPrefixes = original.ManagedClusterLoadBalancerProfileOutboundIPPrefixes
+type ManagedClusterLoadBalancerProfileOutboundIPs = original.ManagedClusterLoadBalancerProfileOutboundIPs
type ManagedClusterPoolUpgradeProfile = original.ManagedClusterPoolUpgradeProfile
type ManagedClusterPoolUpgradeProfileUpgradesItem = original.ManagedClusterPoolUpgradeProfileUpgradesItem
type ManagedClusterProperties = original.ManagedClusterProperties
@@ -419,6 +424,7 @@ type OrchestratorVersionProfileProperties = original.OrchestratorVersionProfileP
type Properties = original.Properties
type PurchasePlan = original.PurchasePlan
type Resource = original.Resource
+type ResourceReference = original.ResourceReference
type SSHConfiguration = original.SSHConfiguration
type SSHPublicKey = original.SSHPublicKey
type ServicePrincipalProfile = original.ServicePrincipalProfile
diff --git a/profiles/preview/datafactory/mgmt/datafactory/models.go b/profiles/preview/datafactory/mgmt/datafactory/models.go
index 753d632f3246..5b7373730a53 100644
--- a/profiles/preview/datafactory/mgmt/datafactory/models.go
+++ b/profiles/preview/datafactory/mgmt/datafactory/models.go
@@ -46,6 +46,16 @@ const (
AuthorizationTypeRBAC AuthorizationType = original.AuthorizationTypeRBAC
)
+type AvroCompressionCodec = original.AvroCompressionCodec
+
+const (
+ Bzip2 AvroCompressionCodec = original.Bzip2
+ Deflate AvroCompressionCodec = original.Deflate
+ None AvroCompressionCodec = original.None
+ Snappy AvroCompressionCodec = original.Snappy
+ Xz AvroCompressionCodec = original.Xz
+)
+
type AzureFunctionActivityMethod = original.AzureFunctionActivityMethod
const (
@@ -140,6 +150,16 @@ const (
OnPremisesWithIfd DynamicsDeploymentType = original.OnPremisesWithIfd
)
+type EventSubscriptionStatus = original.EventSubscriptionStatus
+
+const (
+ Deprovisioning EventSubscriptionStatus = original.Deprovisioning
+ Disabled EventSubscriptionStatus = original.Disabled
+ Enabled EventSubscriptionStatus = original.Enabled
+ Provisioning EventSubscriptionStatus = original.Provisioning
+ Unknown EventSubscriptionStatus = original.Unknown
+)
+
type FtpAuthenticationType = original.FtpAuthenticationType
const (
@@ -171,9 +191,9 @@ const (
type HDInsightActivityDebugInfoOption = original.HDInsightActivityDebugInfoOption
const (
- Always HDInsightActivityDebugInfoOption = original.Always
- Failure HDInsightActivityDebugInfoOption = original.Failure
- None HDInsightActivityDebugInfoOption = original.None
+ HDInsightActivityDebugInfoOptionAlways HDInsightActivityDebugInfoOption = original.HDInsightActivityDebugInfoOptionAlways
+ HDInsightActivityDebugInfoOptionFailure HDInsightActivityDebugInfoOption = original.HDInsightActivityDebugInfoOptionFailure
+ HDInsightActivityDebugInfoOptionNone HDInsightActivityDebugInfoOption = original.HDInsightActivityDebugInfoOptionNone
)
type HTTPAuthenticationType = original.HTTPAuthenticationType
@@ -301,6 +321,13 @@ const (
IntegrationRuntimeUpdateResultSucceed IntegrationRuntimeUpdateResult = original.IntegrationRuntimeUpdateResultSucceed
)
+type JSONWriteFilePattern = original.JSONWriteFilePattern
+
+const (
+ ArrayOfObjects JSONWriteFilePattern = original.ArrayOfObjects
+ SetOfObjects JSONWriteFilePattern = original.SetOfObjects
+)
+
type ManagedIntegrationRuntimeNodeStatus = original.ManagedIntegrationRuntimeNodeStatus
const (
@@ -603,9 +630,9 @@ const (
type TriggerRuntimeState = original.TriggerRuntimeState
const (
- Disabled TriggerRuntimeState = original.Disabled
- Started TriggerRuntimeState = original.Started
- Stopped TriggerRuntimeState = original.Stopped
+ TriggerRuntimeStateDisabled TriggerRuntimeState = original.TriggerRuntimeStateDisabled
+ TriggerRuntimeStateStarted TriggerRuntimeState = original.TriggerRuntimeStateStarted
+ TriggerRuntimeStateStopped TriggerRuntimeState = original.TriggerRuntimeStateStopped
)
type TumblingWindowFrequency = original.TumblingWindowFrequency
@@ -665,9 +692,11 @@ const (
type TypeBasicCopySink = original.TypeBasicCopySink
const (
+ TypeAvroSink TypeBasicCopySink = original.TypeAvroSink
TypeAzureBlobFSSink TypeBasicCopySink = original.TypeAzureBlobFSSink
TypeAzureDataExplorerSink TypeBasicCopySink = original.TypeAzureDataExplorerSink
TypeAzureDataLakeStoreSink TypeBasicCopySink = original.TypeAzureDataLakeStoreSink
+ TypeAzureMySQLSink TypeBasicCopySink = original.TypeAzureMySQLSink
TypeAzurePostgreSQLSink TypeBasicCopySink = original.TypeAzurePostgreSQLSink
TypeAzureQueueSink TypeBasicCopySink = original.TypeAzureQueueSink
TypeAzureSearchIndexSink TypeBasicCopySink = original.TypeAzureSearchIndexSink
@@ -684,6 +713,7 @@ const (
TypeDynamicsSink TypeBasicCopySink = original.TypeDynamicsSink
TypeFileSystemSink TypeBasicCopySink = original.TypeFileSystemSink
TypeInformixSink TypeBasicCopySink = original.TypeInformixSink
+ TypeJSONSink TypeBasicCopySink = original.TypeJSONSink
TypeMicrosoftAccessSink TypeBasicCopySink = original.TypeMicrosoftAccessSink
TypeOdbcSink TypeBasicCopySink = original.TypeOdbcSink
TypeOracleSink TypeBasicCopySink = original.TypeOracleSink
@@ -702,9 +732,11 @@ type TypeBasicCopySource = original.TypeBasicCopySource
const (
TypeAmazonMWSSource TypeBasicCopySource = original.TypeAmazonMWSSource
TypeAmazonRedshiftSource TypeBasicCopySource = original.TypeAmazonRedshiftSource
+ TypeAvroSource TypeBasicCopySource = original.TypeAvroSource
TypeAzureBlobFSSource TypeBasicCopySource = original.TypeAzureBlobFSSource
TypeAzureDataExplorerSource TypeBasicCopySource = original.TypeAzureDataExplorerSource
TypeAzureDataLakeStoreSource TypeBasicCopySource = original.TypeAzureDataLakeStoreSource
+ TypeAzureMariaDBSource TypeBasicCopySource = original.TypeAzureMariaDBSource
TypeAzureMySQLSource TypeBasicCopySource = original.TypeAzureMySQLSource
TypeAzurePostgreSQLSource TypeBasicCopySource = original.TypeAzurePostgreSQLSource
TypeAzureSQLSource TypeBasicCopySource = original.TypeAzureSQLSource
@@ -737,6 +769,7 @@ const (
TypeImpalaSource TypeBasicCopySource = original.TypeImpalaSource
TypeInformixSource TypeBasicCopySource = original.TypeInformixSource
TypeJiraSource TypeBasicCopySource = original.TypeJiraSource
+ TypeJSONSource TypeBasicCopySource = original.TypeJSONSource
TypeMagentoSource TypeBasicCopySource = original.TypeMagentoSource
TypeMariaDBSource TypeBasicCopySource = original.TypeMariaDBSource
TypeMarketoSource TypeBasicCopySource = original.TypeMarketoSource
@@ -762,6 +795,7 @@ const (
TypeSalesforceMarketingCloudSource TypeBasicCopySource = original.TypeSalesforceMarketingCloudSource
TypeSalesforceServiceCloudSource TypeBasicCopySource = original.TypeSalesforceServiceCloudSource
TypeSalesforceSource TypeBasicCopySource = original.TypeSalesforceSource
+ TypeSapBwSource TypeBasicCopySource = original.TypeSapBwSource
TypeSapCloudForCustomerSource TypeBasicCopySource = original.TypeSapCloudForCustomerSource
TypeSapEccSource TypeBasicCopySource = original.TypeSapEccSource
TypeSapHanaSource TypeBasicCopySource = original.TypeSapHanaSource
@@ -787,11 +821,14 @@ type TypeBasicDataset = original.TypeBasicDataset
const (
TypeAmazonMWSObject TypeBasicDataset = original.TypeAmazonMWSObject
+ TypeAmazonRedshiftTable TypeBasicDataset = original.TypeAmazonRedshiftTable
TypeAmazonS3Object TypeBasicDataset = original.TypeAmazonS3Object
+ TypeAvro TypeBasicDataset = original.TypeAvro
TypeAzureBlob TypeBasicDataset = original.TypeAzureBlob
TypeAzureBlobFSFile TypeBasicDataset = original.TypeAzureBlobFSFile
TypeAzureDataExplorerTable TypeBasicDataset = original.TypeAzureDataExplorerTable
TypeAzureDataLakeStoreFile TypeBasicDataset = original.TypeAzureDataLakeStoreFile
+ TypeAzureMariaDBTable TypeBasicDataset = original.TypeAzureMariaDBTable
TypeAzureMySQLTable TypeBasicDataset = original.TypeAzureMySQLTable
TypeAzurePostgreSQLTable TypeBasicDataset = original.TypeAzurePostgreSQLTable
TypeAzureSearchIndex TypeBasicDataset = original.TypeAzureSearchIndex
@@ -807,6 +844,7 @@ const (
TypeCouchbaseTable TypeBasicDataset = original.TypeCouchbaseTable
TypeCustomDataset TypeBasicDataset = original.TypeCustomDataset
TypeDataset TypeBasicDataset = original.TypeDataset
+ TypeDb2Table TypeBasicDataset = original.TypeDb2Table
TypeDelimitedText TypeBasicDataset = original.TypeDelimitedText
TypeDocumentDbCollection TypeBasicDataset = original.TypeDocumentDbCollection
TypeDrillTable TypeBasicDataset = original.TypeDrillTable
@@ -825,6 +863,7 @@ const (
TypeImpalaObject TypeBasicDataset = original.TypeImpalaObject
TypeInformixTable TypeBasicDataset = original.TypeInformixTable
TypeJiraObject TypeBasicDataset = original.TypeJiraObject
+ TypeJSON TypeBasicDataset = original.TypeJSON
TypeMagentoObject TypeBasicDataset = original.TypeMagentoObject
TypeMariaDBTable TypeBasicDataset = original.TypeMariaDBTable
TypeMarketoObject TypeBasicDataset = original.TypeMarketoObject
@@ -850,6 +889,7 @@ const (
TypeSalesforceMarketingCloudObject TypeBasicDataset = original.TypeSalesforceMarketingCloudObject
TypeSalesforceObject TypeBasicDataset = original.TypeSalesforceObject
TypeSalesforceServiceCloudObject TypeBasicDataset = original.TypeSalesforceServiceCloudObject
+ TypeSapBwCube TypeBasicDataset = original.TypeSapBwCube
TypeSapCloudForCustomerResource TypeBasicDataset = original.TypeSapCloudForCustomerResource
TypeSapEccResource TypeBasicDataset = original.TypeSapEccResource
TypeSapHanaTable TypeBasicDataset = original.TypeSapHanaTable
@@ -860,6 +900,7 @@ const (
TypeSparkObject TypeBasicDataset = original.TypeSparkObject
TypeSQLServerTable TypeBasicDataset = original.TypeSQLServerTable
TypeSquareObject TypeBasicDataset = original.TypeSquareObject
+ TypeSybaseTable TypeBasicDataset = original.TypeSybaseTable
TypeTeradataTable TypeBasicDataset = original.TypeTeradataTable
TypeVerticaTable TypeBasicDataset = original.TypeVerticaTable
TypeWebTable TypeBasicDataset = original.TypeWebTable
@@ -936,6 +977,7 @@ const (
TypeAzureDataLakeStore TypeBasicLinkedService = original.TypeAzureDataLakeStore
TypeAzureFunction TypeBasicLinkedService = original.TypeAzureFunction
TypeAzureKeyVault TypeBasicLinkedService = original.TypeAzureKeyVault
+ TypeAzureMariaDB TypeBasicLinkedService = original.TypeAzureMariaDB
TypeAzureML TypeBasicLinkedService = original.TypeAzureML
TypeAzureMySQL TypeBasicLinkedService = original.TypeAzureMySQL
TypeAzurePostgreSQL TypeBasicLinkedService = original.TypeAzurePostgreSQL
@@ -1027,6 +1069,16 @@ const (
TypeSsisObjectMetadata TypeBasicSsisObjectMetadata = original.TypeSsisObjectMetadata
)
+type TypeBasicStoreWriteSettings = original.TypeBasicStoreWriteSettings
+
+const (
+ TypeAzureBlobFSWriteSettings TypeBasicStoreWriteSettings = original.TypeAzureBlobFSWriteSettings
+ TypeAzureBlobStorageWriteSettings TypeBasicStoreWriteSettings = original.TypeAzureBlobStorageWriteSettings
+ TypeAzureDataLakeStoreWriteSettings TypeBasicStoreWriteSettings = original.TypeAzureDataLakeStoreWriteSettings
+ TypeFileServerWriteSettings TypeBasicStoreWriteSettings = original.TypeFileServerWriteSettings
+ TypeStoreWriteSettings TypeBasicStoreWriteSettings = original.TypeStoreWriteSettings
+)
+
type TypeBasicTrigger = original.TypeBasicTrigger
const (
@@ -1076,6 +1128,8 @@ type AmazonMWSSource = original.AmazonMWSSource
type AmazonRedshiftLinkedService = original.AmazonRedshiftLinkedService
type AmazonRedshiftLinkedServiceTypeProperties = original.AmazonRedshiftLinkedServiceTypeProperties
type AmazonRedshiftSource = original.AmazonRedshiftSource
+type AmazonRedshiftTableDataset = original.AmazonRedshiftTableDataset
+type AmazonRedshiftTableDatasetTypeProperties = original.AmazonRedshiftTableDatasetTypeProperties
type AmazonS3Dataset = original.AmazonS3Dataset
type AmazonS3DatasetTypeProperties = original.AmazonS3DatasetTypeProperties
type AmazonS3LinkedService = original.AmazonS3LinkedService
@@ -1084,7 +1138,12 @@ type AmazonS3Location = original.AmazonS3Location
type AmazonS3ReadSettings = original.AmazonS3ReadSettings
type AppendVariableActivity = original.AppendVariableActivity
type AppendVariableActivityTypeProperties = original.AppendVariableActivityTypeProperties
+type AvroDataset = original.AvroDataset
+type AvroDatasetTypeProperties = original.AvroDatasetTypeProperties
type AvroFormat = original.AvroFormat
+type AvroSink = original.AvroSink
+type AvroSource = original.AvroSource
+type AvroWriteSettings = original.AvroWriteSettings
type AzureBatchLinkedService = original.AzureBatchLinkedService
type AzureBatchLinkedServiceTypeProperties = original.AzureBatchLinkedServiceTypeProperties
type AzureBlobDataset = original.AzureBlobDataset
@@ -1138,8 +1197,13 @@ type AzureMLLinkedServiceTypeProperties = original.AzureMLLinkedServiceTypePrope
type AzureMLUpdateResourceActivity = original.AzureMLUpdateResourceActivity
type AzureMLUpdateResourceActivityTypeProperties = original.AzureMLUpdateResourceActivityTypeProperties
type AzureMLWebServiceFile = original.AzureMLWebServiceFile
+type AzureMariaDBLinkedService = original.AzureMariaDBLinkedService
+type AzureMariaDBLinkedServiceTypeProperties = original.AzureMariaDBLinkedServiceTypeProperties
+type AzureMariaDBSource = original.AzureMariaDBSource
+type AzureMariaDBTableDataset = original.AzureMariaDBTableDataset
type AzureMySQLLinkedService = original.AzureMySQLLinkedService
type AzureMySQLLinkedServiceTypeProperties = original.AzureMySQLLinkedServiceTypeProperties
+type AzureMySQLSink = original.AzureMySQLSink
type AzureMySQLSource = original.AzureMySQLSource
type AzureMySQLTableDataset = original.AzureMySQLTableDataset
type AzureMySQLTableDatasetTypeProperties = original.AzureMySQLTableDatasetTypeProperties
@@ -1194,6 +1258,7 @@ type BasicLinkedService = original.BasicLinkedService
type BasicMultiplePipelineTrigger = original.BasicMultiplePipelineTrigger
type BasicSecretBase = original.BasicSecretBase
type BasicSsisObjectMetadata = original.BasicSsisObjectMetadata
+type BasicStoreWriteSettings = original.BasicStoreWriteSettings
type BasicTrigger = original.BasicTrigger
type BasicTriggerDependencyReference = original.BasicTriggerDependencyReference
type BasicWebLinkedServiceTypeProperties = original.BasicWebLinkedServiceTypeProperties
@@ -1274,6 +1339,8 @@ type DatasetsClient = original.DatasetsClient
type Db2LinkedService = original.Db2LinkedService
type Db2LinkedServiceTypeProperties = original.Db2LinkedServiceTypeProperties
type Db2Source = original.Db2Source
+type Db2TableDataset = original.Db2TableDataset
+type Db2TableDatasetTypeProperties = original.Db2TableDatasetTypeProperties
type DeleteActivity = original.DeleteActivity
type DeleteActivityTypeProperties = original.DeleteActivityTypeProperties
type DelimitedTextDataset = original.DelimitedTextDataset
@@ -1288,6 +1355,7 @@ type DocumentDbCollectionDataset = original.DocumentDbCollectionDataset
type DocumentDbCollectionDatasetTypeProperties = original.DocumentDbCollectionDatasetTypeProperties
type DocumentDbCollectionSink = original.DocumentDbCollectionSink
type DocumentDbCollectionSource = original.DocumentDbCollectionSource
+type DrillDatasetTypeProperties = original.DrillDatasetTypeProperties
type DrillLinkedService = original.DrillLinkedService
type DrillLinkedServiceTypeProperties = original.DrillLinkedServiceTypeProperties
type DrillSource = original.DrillSource
@@ -1364,10 +1432,12 @@ type GoogleAdWordsLinkedService = original.GoogleAdWordsLinkedService
type GoogleAdWordsLinkedServiceTypeProperties = original.GoogleAdWordsLinkedServiceTypeProperties
type GoogleAdWordsObjectDataset = original.GoogleAdWordsObjectDataset
type GoogleAdWordsSource = original.GoogleAdWordsSource
+type GoogleBigQueryDatasetTypeProperties = original.GoogleBigQueryDatasetTypeProperties
type GoogleBigQueryLinkedService = original.GoogleBigQueryLinkedService
type GoogleBigQueryLinkedServiceTypeProperties = original.GoogleBigQueryLinkedServiceTypeProperties
type GoogleBigQueryObjectDataset = original.GoogleBigQueryObjectDataset
type GoogleBigQuerySource = original.GoogleBigQuerySource
+type GreenplumDatasetTypeProperties = original.GreenplumDatasetTypeProperties
type GreenplumLinkedService = original.GreenplumLinkedService
type GreenplumLinkedServiceTypeProperties = original.GreenplumLinkedServiceTypeProperties
type GreenplumSource = original.GreenplumSource
@@ -1402,6 +1472,7 @@ type HdfsLinkedServiceTypeProperties = original.HdfsLinkedServiceTypeProperties
type HdfsLocation = original.HdfsLocation
type HdfsReadSettings = original.HdfsReadSettings
type HdfsSource = original.HdfsSource
+type HiveDatasetTypeProperties = original.HiveDatasetTypeProperties
type HiveLinkedService = original.HiveLinkedService
type HiveLinkedServiceTypeProperties = original.HiveLinkedServiceTypeProperties
type HiveObjectDataset = original.HiveObjectDataset
@@ -1412,6 +1483,7 @@ type HubspotObjectDataset = original.HubspotObjectDataset
type HubspotSource = original.HubspotSource
type IfConditionActivity = original.IfConditionActivity
type IfConditionActivityTypeProperties = original.IfConditionActivityTypeProperties
+type ImpalaDatasetTypeProperties = original.ImpalaDatasetTypeProperties
type ImpalaLinkedService = original.ImpalaLinkedService
type ImpalaLinkedServiceTypeProperties = original.ImpalaLinkedServiceTypeProperties
type ImpalaObjectDataset = original.ImpalaObjectDataset
@@ -1449,7 +1521,12 @@ type IntegrationRuntimeVNetProperties = original.IntegrationRuntimeVNetPropertie
type IntegrationRuntimesClient = original.IntegrationRuntimesClient
type IntegrationRuntimesStartFuture = original.IntegrationRuntimesStartFuture
type IntegrationRuntimesStopFuture = original.IntegrationRuntimesStopFuture
+type JSONDataset = original.JSONDataset
+type JSONDatasetTypeProperties = original.JSONDatasetTypeProperties
type JSONFormat = original.JSONFormat
+type JSONSink = original.JSONSink
+type JSONSource = original.JSONSource
+type JSONWriteSettings = original.JSONWriteSettings
type JiraLinkedService = original.JiraLinkedService
type JiraLinkedServiceTypeProperties = original.JiraLinkedServiceTypeProperties
type JiraObjectDataset = original.JiraObjectDataset
@@ -1516,6 +1593,7 @@ type NetezzaLinkedServiceTypeProperties = original.NetezzaLinkedServiceTypePrope
type NetezzaPartitionSettings = original.NetezzaPartitionSettings
type NetezzaSource = original.NetezzaSource
type NetezzaTableDataset = original.NetezzaTableDataset
+type NetezzaTableDatasetTypeProperties = original.NetezzaTableDatasetTypeProperties
type ODataLinkedService = original.ODataLinkedService
type ODataLinkedServiceTypeProperties = original.ODataLinkedServiceTypeProperties
type ODataResourceDataset = original.ODataResourceDataset
@@ -1566,6 +1644,7 @@ type PaypalLinkedService = original.PaypalLinkedService
type PaypalLinkedServiceTypeProperties = original.PaypalLinkedServiceTypeProperties
type PaypalObjectDataset = original.PaypalObjectDataset
type PaypalSource = original.PaypalSource
+type PhoenixDatasetTypeProperties = original.PhoenixDatasetTypeProperties
type PhoenixLinkedService = original.PhoenixLinkedService
type PhoenixLinkedServiceTypeProperties = original.PhoenixLinkedServiceTypeProperties
type PhoenixObjectDataset = original.PhoenixObjectDataset
@@ -1588,6 +1667,7 @@ type PostgreSQLLinkedServiceTypeProperties = original.PostgreSQLLinkedServiceTyp
type PostgreSQLSource = original.PostgreSQLSource
type PostgreSQLTableDataset = original.PostgreSQLTableDataset
type PostgreSQLTableDatasetTypeProperties = original.PostgreSQLTableDatasetTypeProperties
+type PrestoDatasetTypeProperties = original.PrestoDatasetTypeProperties
type PrestoLinkedService = original.PrestoLinkedService
type PrestoLinkedServiceTypeProperties = original.PrestoLinkedServiceTypeProperties
type PrestoObjectDataset = original.PrestoObjectDataset
@@ -1668,6 +1748,8 @@ type SalesforceSink = original.SalesforceSink
type SalesforceSource = original.SalesforceSource
type SapBWLinkedService = original.SapBWLinkedService
type SapBWLinkedServiceTypeProperties = original.SapBWLinkedServiceTypeProperties
+type SapBwCubeDataset = original.SapBwCubeDataset
+type SapBwSource = original.SapBwSource
type SapCloudForCustomerLinkedService = original.SapCloudForCustomerLinkedService
type SapCloudForCustomerLinkedServiceTypeProperties = original.SapCloudForCustomerLinkedServiceTypeProperties
type SapCloudForCustomerResourceDataset = original.SapCloudForCustomerResourceDataset
@@ -1721,6 +1803,7 @@ type ShopifyLinkedService = original.ShopifyLinkedService
type ShopifyLinkedServiceTypeProperties = original.ShopifyLinkedServiceTypeProperties
type ShopifyObjectDataset = original.ShopifyObjectDataset
type ShopifySource = original.ShopifySource
+type SparkDatasetTypeProperties = original.SparkDatasetTypeProperties
type SparkLinkedService = original.SparkLinkedService
type SparkLinkedServiceTypeProperties = original.SparkLinkedServiceTypeProperties
type SparkObjectDataset = original.SparkObjectDataset
@@ -1747,6 +1830,8 @@ type SubResource = original.SubResource
type SybaseLinkedService = original.SybaseLinkedService
type SybaseLinkedServiceTypeProperties = original.SybaseLinkedServiceTypeProperties
type SybaseSource = original.SybaseSource
+type SybaseTableDataset = original.SybaseTableDataset
+type SybaseTableDatasetTypeProperties = original.SybaseTableDatasetTypeProperties
type TeradataLinkedService = original.TeradataLinkedService
type TeradataLinkedServiceTypeProperties = original.TeradataLinkedServiceTypeProperties
type TeradataPartitionSettings = original.TeradataPartitionSettings
@@ -1765,9 +1850,12 @@ type TriggerResource = original.TriggerResource
type TriggerRun = original.TriggerRun
type TriggerRunsClient = original.TriggerRunsClient
type TriggerRunsQueryResponse = original.TriggerRunsQueryResponse
+type TriggerSubscriptionOperationStatus = original.TriggerSubscriptionOperationStatus
type TriggersClient = original.TriggersClient
type TriggersStartFuture = original.TriggersStartFuture
type TriggersStopFuture = original.TriggersStopFuture
+type TriggersSubscribeToEventsFuture = original.TriggersSubscribeToEventsFuture
+type TriggersUnsubscribeFromEventsFuture = original.TriggersUnsubscribeFromEventsFuture
type TumblingWindowTrigger = original.TumblingWindowTrigger
type TumblingWindowTriggerDependencyReference = original.TumblingWindowTriggerDependencyReference
type TumblingWindowTriggerTypeProperties = original.TumblingWindowTriggerTypeProperties
@@ -1780,6 +1868,7 @@ type UserProperty = original.UserProperty
type ValidationActivity = original.ValidationActivity
type ValidationActivityTypeProperties = original.ValidationActivityTypeProperties
type VariableSpecification = original.VariableSpecification
+type VerticaDatasetTypeProperties = original.VerticaDatasetTypeProperties
type VerticaLinkedService = original.VerticaLinkedService
type VerticaLinkedServiceTypeProperties = original.VerticaLinkedServiceTypeProperties
type VerticaSource = original.VerticaSource
@@ -1952,6 +2041,9 @@ func PossibleAuthenticationTypeValues() []AuthenticationType {
func PossibleAuthorizationTypeValues() []AuthorizationType {
return original.PossibleAuthorizationTypeValues()
}
+func PossibleAvroCompressionCodecValues() []AvroCompressionCodec {
+ return original.PossibleAvroCompressionCodecValues()
+}
func PossibleAzureFunctionActivityMethodValues() []AzureFunctionActivityMethod {
return original.PossibleAzureFunctionActivityMethodValues()
}
@@ -1982,6 +2074,9 @@ func PossibleDynamicsAuthenticationTypeValues() []DynamicsAuthenticationType {
func PossibleDynamicsDeploymentTypeValues() []DynamicsDeploymentType {
return original.PossibleDynamicsDeploymentTypeValues()
}
+func PossibleEventSubscriptionStatusValues() []EventSubscriptionStatus {
+ return original.PossibleEventSubscriptionStatusValues()
+}
func PossibleFtpAuthenticationTypeValues() []FtpAuthenticationType {
return original.PossibleFtpAuthenticationTypeValues()
}
@@ -2042,6 +2137,9 @@ func PossibleIntegrationRuntimeTypeValues() []IntegrationRuntimeType {
func PossibleIntegrationRuntimeUpdateResultValues() []IntegrationRuntimeUpdateResult {
return original.PossibleIntegrationRuntimeUpdateResultValues()
}
+func PossibleJSONWriteFilePatternValues() []JSONWriteFilePattern {
+ return original.PossibleJSONWriteFilePatternValues()
+}
func PossibleManagedIntegrationRuntimeNodeStatusValues() []ManagedIntegrationRuntimeNodeStatus {
return original.PossibleManagedIntegrationRuntimeNodeStatusValues()
}
@@ -2186,6 +2284,9 @@ func PossibleTypeBasicLinkedServiceValues() []TypeBasicLinkedService {
func PossibleTypeBasicSsisObjectMetadataValues() []TypeBasicSsisObjectMetadata {
return original.PossibleTypeBasicSsisObjectMetadataValues()
}
+func PossibleTypeBasicStoreWriteSettingsValues() []TypeBasicStoreWriteSettings {
+ return original.PossibleTypeBasicStoreWriteSettingsValues()
+}
func PossibleTypeBasicTriggerValues() []TypeBasicTrigger {
return original.PossibleTypeBasicTriggerValues()
}
diff --git a/profiles/preview/datamigration/mgmt/datamigration/models.go b/profiles/preview/datamigration/mgmt/datamigration/models.go
index 5353ba766440..5c38ab119fb1 100644
--- a/profiles/preview/datamigration/mgmt/datamigration/models.go
+++ b/profiles/preview/datamigration/mgmt/datamigration/models.go
@@ -83,9 +83,9 @@ const (
type CommandType = original.CommandType
const (
+ CommandTypeCommandProperties CommandType = original.CommandTypeCommandProperties
CommandTypeMigrateSQLServerAzureDbSQLMiComplete CommandType = original.CommandTypeMigrateSQLServerAzureDbSQLMiComplete
CommandTypeMigrateSyncCompleteDatabase CommandType = original.CommandTypeMigrateSyncCompleteDatabase
- CommandTypeUnknown CommandType = original.CommandTypeUnknown
)
type DatabaseCompatLevel = original.DatabaseCompatLevel
@@ -486,7 +486,7 @@ const (
TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS TaskType = original.TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS
TaskTypeMigrateSQLServerAzureSQLDbSync TaskType = original.TaskTypeMigrateSQLServerAzureSQLDbSync
TaskTypeMigrateSQLServerSQLDb TaskType = original.TaskTypeMigrateSQLServerSQLDb
- TaskTypeUnknown TaskType = original.TaskTypeUnknown
+ TaskTypeProjectTaskProperties TaskType = original.TaskTypeProjectTaskProperties
TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI TaskType = original.TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI
TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS TaskType = original.TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS
TaskTypeValidateMigrationInputSQLServerSQLDbSync TaskType = original.TaskTypeValidateMigrationInputSQLServerSQLDbSync
@@ -495,11 +495,11 @@ const (
type Type = original.Type
const (
+ TypeConnectionInfo Type = original.TypeConnectionInfo
TypeMiSQLConnectionInfo Type = original.TypeMiSQLConnectionInfo
TypeMySQLConnectionInfo Type = original.TypeMySQLConnectionInfo
TypePostgreSQLConnectionInfo Type = original.TypePostgreSQLConnectionInfo
TypeSQLConnectionInfo Type = original.TypeSQLConnectionInfo
- TypeUnknown Type = original.TypeUnknown
)
type UpdateActionType = original.UpdateActionType
diff --git a/profiles/preview/generate.go b/profiles/preview/generate.go
index 47003edbd7a2..d8d46e5bae01 100644
--- a/profiles/preview/generate.go
+++ b/profiles/preview/generate.go
@@ -14,4 +14,4 @@
package preview
-//go:generate go run ../../tools/profileBuilder/main.go latest --clear-output --name preview --root ../../services --output-location ./ --preview --verbose
+//go:generate go run ../../tools/profileBuilder/main.go latest --clear-output --name preview --root ../../services --output-location ./ --preview
diff --git a/profiles/preview/logic/mgmt/logic/logicapi/models.go b/profiles/preview/logic/mgmt/logic/logicapi/models.go
index fd8780499286..d311caa49361 100644
--- a/profiles/preview/logic/mgmt/logic/logicapi/models.go
+++ b/profiles/preview/logic/mgmt/logic/logicapi/models.go
@@ -29,6 +29,7 @@ type IntegrationAccountBatchConfigurationsClientAPI = original.IntegrationAccoun
type IntegrationAccountsClientAPI = original.IntegrationAccountsClientAPI
type MapsClientAPI = original.MapsClientAPI
type PartnersClientAPI = original.PartnersClientAPI
+type RosettaNetProcessConfigurationsClientAPI = original.RosettaNetProcessConfigurationsClientAPI
type SchemasClientAPI = original.SchemasClientAPI
type SessionsClientAPI = original.SessionsClientAPI
type WorkflowRunActionRepetitionsClientAPI = original.WorkflowRunActionRepetitionsClientAPI
diff --git a/profiles/preview/logic/mgmt/logic/models.go b/profiles/preview/logic/mgmt/logic/models.go
index aeac298216ad..9e459f0dbb09 100644
--- a/profiles/preview/logic/mgmt/logic/models.go
+++ b/profiles/preview/logic/mgmt/logic/models.go
@@ -156,6 +156,15 @@ const (
MapTypeXslt MapType = original.MapTypeXslt
)
+type MessageEncoding = original.MessageEncoding
+
+const (
+ MessageEncodingBase64 MessageEncoding = original.MessageEncodingBase64
+ MessageEncodingEightBit MessageEncoding = original.MessageEncodingEightBit
+ MessageEncodingNotSpecified MessageEncoding = original.MessageEncodingNotSpecified
+ MessageEncodingQuotedPrintable MessageEncoding = original.MessageEncodingQuotedPrintable
+)
+
type MessageFilterType = original.MessageFilterType
const (
@@ -198,6 +207,52 @@ const (
RecurrenceFrequencyYear RecurrenceFrequency = original.RecurrenceFrequencyYear
)
+type RosettaNetActionType = original.RosettaNetActionType
+
+const (
+ RosettaNetActionTypeDoubleAction RosettaNetActionType = original.RosettaNetActionTypeDoubleAction
+ RosettaNetActionTypeNotSpecified RosettaNetActionType = original.RosettaNetActionTypeNotSpecified
+ RosettaNetActionTypeSingleAction RosettaNetActionType = original.RosettaNetActionTypeSingleAction
+)
+
+type RosettaNetPipActivityType = original.RosettaNetPipActivityType
+
+const (
+ RosettaNetPipActivityTypeBusinessTransaction RosettaNetPipActivityType = original.RosettaNetPipActivityTypeBusinessTransaction
+ RosettaNetPipActivityTypeInformationDistribution RosettaNetPipActivityType = original.RosettaNetPipActivityTypeInformationDistribution
+ RosettaNetPipActivityTypeNotification RosettaNetPipActivityType = original.RosettaNetPipActivityTypeNotification
+ RosettaNetPipActivityTypeNotSpecified RosettaNetPipActivityType = original.RosettaNetPipActivityTypeNotSpecified
+ RosettaNetPipActivityTypeQueryResponse RosettaNetPipActivityType = original.RosettaNetPipActivityTypeQueryResponse
+ RosettaNetPipActivityTypeRequestConfirm RosettaNetPipActivityType = original.RosettaNetPipActivityTypeRequestConfirm
+ RosettaNetPipActivityTypeRequestResponse RosettaNetPipActivityType = original.RosettaNetPipActivityTypeRequestResponse
+)
+
+type RosettaNetPipConfidentialityScope = original.RosettaNetPipConfidentialityScope
+
+const (
+ RosettaNetPipConfidentialityScopeNone RosettaNetPipConfidentialityScope = original.RosettaNetPipConfidentialityScopeNone
+ RosettaNetPipConfidentialityScopeNotSpecified RosettaNetPipConfidentialityScope = original.RosettaNetPipConfidentialityScopeNotSpecified
+ RosettaNetPipConfidentialityScopePayload RosettaNetPipConfidentialityScope = original.RosettaNetPipConfidentialityScopePayload
+ RosettaNetPipConfidentialityScopePayloadContainer RosettaNetPipConfidentialityScope = original.RosettaNetPipConfidentialityScopePayloadContainer
+)
+
+type RosettaNetPipRoleType = original.RosettaNetPipRoleType
+
+const (
+ RosettaNetPipRoleTypeEmployee RosettaNetPipRoleType = original.RosettaNetPipRoleTypeEmployee
+ RosettaNetPipRoleTypeFunctional RosettaNetPipRoleType = original.RosettaNetPipRoleTypeFunctional
+ RosettaNetPipRoleTypeNotSpecified RosettaNetPipRoleType = original.RosettaNetPipRoleTypeNotSpecified
+ RosettaNetPipRoleTypeOrganizational RosettaNetPipRoleType = original.RosettaNetPipRoleTypeOrganizational
+)
+
+type RosettaNetResponseType = original.RosettaNetResponseType
+
+const (
+ RosettaNetResponseTypeAsync RosettaNetResponseType = original.RosettaNetResponseTypeAsync
+ RosettaNetResponseTypeNotSpecified RosettaNetResponseType = original.RosettaNetResponseTypeNotSpecified
+ RosettaNetResponseTypeSync RosettaNetResponseType = original.RosettaNetResponseTypeSync
+)
+
type SchemaType = original.SchemaType
const (
@@ -468,6 +523,11 @@ type IntegrationAccountPartnerListResult = original.IntegrationAccountPartnerLis
type IntegrationAccountPartnerListResultIterator = original.IntegrationAccountPartnerListResultIterator
type IntegrationAccountPartnerListResultPage = original.IntegrationAccountPartnerListResultPage
type IntegrationAccountPartnerProperties = original.IntegrationAccountPartnerProperties
+type IntegrationAccountRosettaNetProcessConfiguration = original.IntegrationAccountRosettaNetProcessConfiguration
+type IntegrationAccountRosettaNetProcessConfigurationListResult = original.IntegrationAccountRosettaNetProcessConfigurationListResult
+type IntegrationAccountRosettaNetProcessConfigurationListResultIterator = original.IntegrationAccountRosettaNetProcessConfigurationListResultIterator
+type IntegrationAccountRosettaNetProcessConfigurationListResultPage = original.IntegrationAccountRosettaNetProcessConfigurationListResultPage
+type IntegrationAccountRosettaNetProcessConfigurationProperties = original.IntegrationAccountRosettaNetProcessConfigurationProperties
type IntegrationAccountSchema = original.IntegrationAccountSchema
type IntegrationAccountSchemaFilter = original.IntegrationAccountSchemaFilter
type IntegrationAccountSchemaListResult = original.IntegrationAccountSchemaListResult
@@ -514,6 +574,12 @@ type Resource = original.Resource
type ResourceReference = original.ResourceReference
type Response = original.Response
type RetryHistory = original.RetryHistory
+type RosettaNetPipAcknowledgmentOfReceiptSettings = original.RosettaNetPipAcknowledgmentOfReceiptSettings
+type RosettaNetPipActivityBehavior = original.RosettaNetPipActivityBehavior
+type RosettaNetPipActivitySettings = original.RosettaNetPipActivitySettings
+type RosettaNetPipBusinessDocument = original.RosettaNetPipBusinessDocument
+type RosettaNetPipRoleSettings = original.RosettaNetPipRoleSettings
+type RosettaNetProcessConfigurationsClient = original.RosettaNetProcessConfigurationsClient
type RunActionCorrelation = original.RunActionCorrelation
type RunCorrelation = original.RunCorrelation
type SchemasClient = original.SchemasClient
@@ -653,6 +719,12 @@ func NewIntegrationAccountPartnerListResultIterator(page IntegrationAccountPartn
func NewIntegrationAccountPartnerListResultPage(getNextPage func(context.Context, IntegrationAccountPartnerListResult) (IntegrationAccountPartnerListResult, error)) IntegrationAccountPartnerListResultPage {
return original.NewIntegrationAccountPartnerListResultPage(getNextPage)
}
+func NewIntegrationAccountRosettaNetProcessConfigurationListResultIterator(page IntegrationAccountRosettaNetProcessConfigurationListResultPage) IntegrationAccountRosettaNetProcessConfigurationListResultIterator {
+ return original.NewIntegrationAccountRosettaNetProcessConfigurationListResultIterator(page)
+}
+func NewIntegrationAccountRosettaNetProcessConfigurationListResultPage(getNextPage func(context.Context, IntegrationAccountRosettaNetProcessConfigurationListResult) (IntegrationAccountRosettaNetProcessConfigurationListResult, error)) IntegrationAccountRosettaNetProcessConfigurationListResultPage {
+ return original.NewIntegrationAccountRosettaNetProcessConfigurationListResultPage(getNextPage)
+}
func NewIntegrationAccountSchemaListResultIterator(page IntegrationAccountSchemaListResultPage) IntegrationAccountSchemaListResultIterator {
return original.NewIntegrationAccountSchemaListResultIterator(page)
}
@@ -695,6 +767,12 @@ func NewRequestHistoryListResultIterator(page RequestHistoryListResultPage) Requ
func NewRequestHistoryListResultPage(getNextPage func(context.Context, RequestHistoryListResult) (RequestHistoryListResult, error)) RequestHistoryListResultPage {
return original.NewRequestHistoryListResultPage(getNextPage)
}
+func NewRosettaNetProcessConfigurationsClient(subscriptionID string) RosettaNetProcessConfigurationsClient {
+ return original.NewRosettaNetProcessConfigurationsClient(subscriptionID)
+}
+func NewRosettaNetProcessConfigurationsClientWithBaseURI(baseURI string, subscriptionID string) RosettaNetProcessConfigurationsClient {
+ return original.NewRosettaNetProcessConfigurationsClientWithBaseURI(baseURI, subscriptionID)
+}
func NewSchemasClient(subscriptionID string) SchemasClient {
return original.NewSchemasClient(subscriptionID)
}
@@ -848,6 +926,9 @@ func PossibleKeyTypeValues() []KeyType {
func PossibleMapTypeValues() []MapType {
return original.PossibleMapTypeValues()
}
+func PossibleMessageEncodingValues() []MessageEncoding {
+ return original.PossibleMessageEncodingValues()
+}
func PossibleMessageFilterTypeValues() []MessageFilterType {
return original.PossibleMessageFilterTypeValues()
}
@@ -860,6 +941,21 @@ func PossiblePartnerTypeValues() []PartnerType {
func PossibleRecurrenceFrequencyValues() []RecurrenceFrequency {
return original.PossibleRecurrenceFrequencyValues()
}
+func PossibleRosettaNetActionTypeValues() []RosettaNetActionType {
+ return original.PossibleRosettaNetActionTypeValues()
+}
+func PossibleRosettaNetPipActivityTypeValues() []RosettaNetPipActivityType {
+ return original.PossibleRosettaNetPipActivityTypeValues()
+}
+func PossibleRosettaNetPipConfidentialityScopeValues() []RosettaNetPipConfidentialityScope {
+ return original.PossibleRosettaNetPipConfidentialityScopeValues()
+}
+func PossibleRosettaNetPipRoleTypeValues() []RosettaNetPipRoleType {
+ return original.PossibleRosettaNetPipRoleTypeValues()
+}
+func PossibleRosettaNetResponseTypeValues() []RosettaNetResponseType {
+ return original.PossibleRosettaNetResponseTypeValues()
+}
func PossibleSchemaTypeValues() []SchemaType {
return original.PossibleSchemaTypeValues()
}
diff --git a/profiles/preview/machinelearningservices/mgmt/machinelearningservices/machinelearningservicesapi/models.go b/profiles/preview/machinelearningservices/mgmt/machinelearningservices/machinelearningservicesapi/models.go
index ec13e27528ef..0d80dd1aa71c 100644
--- a/profiles/preview/machinelearningservices/mgmt/machinelearningservices/machinelearningservicesapi/models.go
+++ b/profiles/preview/machinelearningservices/mgmt/machinelearningservices/machinelearningservicesapi/models.go
@@ -19,7 +19,7 @@
package machinelearningservicesapi
-import original "github.com/Azure/azure-sdk-for-go/services/machinelearningservices/mgmt/2019-05-01/machinelearningservices/machinelearningservicesapi"
+import original "github.com/Azure/azure-sdk-for-go/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices/machinelearningservicesapi"
type MachineLearningComputeClientAPI = original.MachineLearningComputeClientAPI
type OperationsClientAPI = original.OperationsClientAPI
diff --git a/profiles/preview/machinelearningservices/mgmt/machinelearningservices/models.go b/profiles/preview/machinelearningservices/mgmt/machinelearningservices/models.go
index 72867cd33d1c..de8a0ceee1f8 100644
--- a/profiles/preview/machinelearningservices/mgmt/machinelearningservices/models.go
+++ b/profiles/preview/machinelearningservices/mgmt/machinelearningservices/models.go
@@ -22,7 +22,7 @@ package machinelearningservices
import (
"context"
- original "github.com/Azure/azure-sdk-for-go/services/machinelearningservices/mgmt/2019-05-01/machinelearningservices"
+ original "github.com/Azure/azure-sdk-for-go/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices"
)
const (
@@ -77,6 +77,17 @@ const (
ComputeTypeBasicComputeSecretsComputeTypeVirtualMachine ComputeTypeBasicComputeSecrets = original.ComputeTypeBasicComputeSecretsComputeTypeVirtualMachine
)
+type NodeState = original.NodeState
+
+const (
+ Idle NodeState = original.Idle
+ Leaving NodeState = original.Leaving
+ Preempted NodeState = original.Preempted
+ Preparing NodeState = original.Preparing
+ Running NodeState = original.Running
+ Unusable NodeState = original.Unusable
+)
+
type ProvisioningState = original.ProvisioningState
const (
@@ -89,6 +100,14 @@ const (
Updating ProvisioningState = original.Updating
)
+type RemoteLoginPortPublicAccess = original.RemoteLoginPortPublicAccess
+
+const (
+ Disabled RemoteLoginPortPublicAccess = original.Disabled
+ Enabled RemoteLoginPortPublicAccess = original.Enabled
+ NotSpecified RemoteLoginPortPublicAccess = original.NotSpecified
+)
+
type ResourceIdentityType = original.ResourceIdentityType
const (
@@ -98,8 +117,8 @@ const (
type Status = original.Status
const (
- Disabled Status = original.Disabled
- Enabled Status = original.Enabled
+ StatusDisabled Status = original.StatusDisabled
+ StatusEnabled Status = original.StatusEnabled
)
type UnderlyingResourceAction = original.UnderlyingResourceAction
@@ -266,9 +285,15 @@ func PossibleComputeTypeBasicComputeValues() []ComputeTypeBasicCompute {
func PossibleComputeTypeValues() []ComputeType {
return original.PossibleComputeTypeValues()
}
+func PossibleNodeStateValues() []NodeState {
+ return original.PossibleNodeStateValues()
+}
func PossibleProvisioningStateValues() []ProvisioningState {
return original.PossibleProvisioningStateValues()
}
+func PossibleRemoteLoginPortPublicAccessValues() []RemoteLoginPortPublicAccess {
+ return original.PossibleRemoteLoginPortPublicAccessValues()
+}
func PossibleResourceIdentityTypeValues() []ResourceIdentityType {
return original.PossibleResourceIdentityTypeValues()
}
diff --git a/profiles/preview/network/mgmt/network/models.go b/profiles/preview/network/mgmt/network/models.go
index b31bdeafe082..58b973bfc48e 100644
--- a/profiles/preview/network/mgmt/network/models.go
+++ b/profiles/preview/network/mgmt/network/models.go
@@ -422,6 +422,37 @@ const (
QinQ ExpressRoutePortsEncapsulation = original.QinQ
)
+type FirewallPolicyFilterRuleActionType = original.FirewallPolicyFilterRuleActionType
+
+const (
+ FirewallPolicyFilterRuleActionTypeAlert FirewallPolicyFilterRuleActionType = original.FirewallPolicyFilterRuleActionTypeAlert
+ FirewallPolicyFilterRuleActionTypeAllow FirewallPolicyFilterRuleActionType = original.FirewallPolicyFilterRuleActionTypeAllow
+ FirewallPolicyFilterRuleActionTypeDeny FirewallPolicyFilterRuleActionType = original.FirewallPolicyFilterRuleActionTypeDeny
+)
+
+type FirewallPolicyNatRuleActionType = original.FirewallPolicyNatRuleActionType
+
+const (
+ DNAT FirewallPolicyNatRuleActionType = original.DNAT
+ SNAT FirewallPolicyNatRuleActionType = original.SNAT
+)
+
+type FirewallPolicyRuleConditionApplicationProtocolType = original.FirewallPolicyRuleConditionApplicationProtocolType
+
+const (
+ FirewallPolicyRuleConditionApplicationProtocolTypeHTTP FirewallPolicyRuleConditionApplicationProtocolType = original.FirewallPolicyRuleConditionApplicationProtocolTypeHTTP
+ FirewallPolicyRuleConditionApplicationProtocolTypeHTTPS FirewallPolicyRuleConditionApplicationProtocolType = original.FirewallPolicyRuleConditionApplicationProtocolTypeHTTPS
+)
+
+type FirewallPolicyRuleConditionNetworkProtocol = original.FirewallPolicyRuleConditionNetworkProtocol
+
+const (
+ FirewallPolicyRuleConditionNetworkProtocolAny FirewallPolicyRuleConditionNetworkProtocol = original.FirewallPolicyRuleConditionNetworkProtocolAny
+ FirewallPolicyRuleConditionNetworkProtocolICMP FirewallPolicyRuleConditionNetworkProtocol = original.FirewallPolicyRuleConditionNetworkProtocolICMP
+ FirewallPolicyRuleConditionNetworkProtocolTCP FirewallPolicyRuleConditionNetworkProtocol = original.FirewallPolicyRuleConditionNetworkProtocolTCP
+ FirewallPolicyRuleConditionNetworkProtocolUDP FirewallPolicyRuleConditionNetworkProtocol = original.FirewallPolicyRuleConditionNetworkProtocolUDP
+)
+
type FlowLogFormatType = original.FlowLogFormatType
const (
@@ -698,6 +729,22 @@ const (
RouteNextHopTypeVnetLocal RouteNextHopType = original.RouteNextHopTypeVnetLocal
)
+type RuleConditionType = original.RuleConditionType
+
+const (
+ RuleConditionTypeApplicationRuleCondition RuleConditionType = original.RuleConditionTypeApplicationRuleCondition
+ RuleConditionTypeFirewallPolicyRuleCondition RuleConditionType = original.RuleConditionTypeFirewallPolicyRuleCondition
+ RuleConditionTypeNetworkRuleCondition RuleConditionType = original.RuleConditionTypeNetworkRuleCondition
+)
+
+type RuleType = original.RuleType
+
+const (
+ RuleTypeFirewallPolicyFilterRule RuleType = original.RuleTypeFirewallPolicyFilterRule
+ RuleTypeFirewallPolicyNatRule RuleType = original.RuleTypeFirewallPolicyNatRule
+ RuleTypeFirewallPolicyRule RuleType = original.RuleTypeFirewallPolicyRule
+)
+
type SecurityRuleAccess = original.SecurityRuleAccess
const (
@@ -1032,6 +1079,7 @@ type ApplicationGatewaysDeleteFuture = original.ApplicationGatewaysDeleteFuture
type ApplicationGatewaysStartFuture = original.ApplicationGatewaysStartFuture
type ApplicationGatewaysStopFuture = original.ApplicationGatewaysStopFuture
type ApplicationGatewaysUpdateTagsFuture = original.ApplicationGatewaysUpdateTagsFuture
+type ApplicationRuleCondition = original.ApplicationRuleCondition
type ApplicationSecurityGroup = original.ApplicationSecurityGroup
type ApplicationSecurityGroupListResult = original.ApplicationSecurityGroupListResult
type ApplicationSecurityGroupListResultIterator = original.ApplicationSecurityGroupListResultIterator
@@ -1092,6 +1140,7 @@ type AzureFirewallNetworkRule = original.AzureFirewallNetworkRule
type AzureFirewallNetworkRuleCollection = original.AzureFirewallNetworkRuleCollection
type AzureFirewallNetworkRuleCollectionPropertiesFormat = original.AzureFirewallNetworkRuleCollectionPropertiesFormat
type AzureFirewallPropertiesFormat = original.AzureFirewallPropertiesFormat
+type AzureFirewallPublicIPAddress = original.AzureFirewallPublicIPAddress
type AzureFirewallRCAction = original.AzureFirewallRCAction
type AzureFirewallsClient = original.AzureFirewallsClient
type AzureFirewallsCreateOrUpdateFuture = original.AzureFirewallsCreateOrUpdateFuture
@@ -1105,6 +1154,8 @@ type BGPCommunity = original.BGPCommunity
type BackendAddressPool = original.BackendAddressPool
type BackendAddressPoolPropertiesFormat = original.BackendAddressPoolPropertiesFormat
type BaseClient = original.BaseClient
+type BasicFirewallPolicyRule = original.BasicFirewallPolicyRule
+type BasicFirewallPolicyRuleCondition = original.BasicFirewallPolicyRuleCondition
type BastionHost = original.BastionHost
type BastionHostIPConfiguration = original.BastionHostIPConfiguration
type BastionHostIPConfigurationPropertiesFormat = original.BastionHostIPConfigurationPropertiesFormat
@@ -1306,6 +1357,29 @@ type ExpressRouteServiceProviderListResultIterator = original.ExpressRouteServic
type ExpressRouteServiceProviderListResultPage = original.ExpressRouteServiceProviderListResultPage
type ExpressRouteServiceProviderPropertiesFormat = original.ExpressRouteServiceProviderPropertiesFormat
type ExpressRouteServiceProvidersClient = original.ExpressRouteServiceProvidersClient
+type FirewallPoliciesClient = original.FirewallPoliciesClient
+type FirewallPoliciesCreateOrUpdateFuture = original.FirewallPoliciesCreateOrUpdateFuture
+type FirewallPoliciesDeleteFuture = original.FirewallPoliciesDeleteFuture
+type FirewallPolicy = original.FirewallPolicy
+type FirewallPolicyFilterRule = original.FirewallPolicyFilterRule
+type FirewallPolicyFilterRuleAction = original.FirewallPolicyFilterRuleAction
+type FirewallPolicyListResult = original.FirewallPolicyListResult
+type FirewallPolicyListResultIterator = original.FirewallPolicyListResultIterator
+type FirewallPolicyListResultPage = original.FirewallPolicyListResultPage
+type FirewallPolicyNatRule = original.FirewallPolicyNatRule
+type FirewallPolicyNatRuleAction = original.FirewallPolicyNatRuleAction
+type FirewallPolicyPropertiesFormat = original.FirewallPolicyPropertiesFormat
+type FirewallPolicyRule = original.FirewallPolicyRule
+type FirewallPolicyRuleCondition = original.FirewallPolicyRuleCondition
+type FirewallPolicyRuleConditionApplicationProtocol = original.FirewallPolicyRuleConditionApplicationProtocol
+type FirewallPolicyRuleGroup = original.FirewallPolicyRuleGroup
+type FirewallPolicyRuleGroupListResult = original.FirewallPolicyRuleGroupListResult
+type FirewallPolicyRuleGroupListResultIterator = original.FirewallPolicyRuleGroupListResultIterator
+type FirewallPolicyRuleGroupListResultPage = original.FirewallPolicyRuleGroupListResultPage
+type FirewallPolicyRuleGroupProperties = original.FirewallPolicyRuleGroupProperties
+type FirewallPolicyRuleGroupsClient = original.FirewallPolicyRuleGroupsClient
+type FirewallPolicyRuleGroupsCreateOrUpdateFuture = original.FirewallPolicyRuleGroupsCreateOrUpdateFuture
+type FirewallPolicyRuleGroupsDeleteFuture = original.FirewallPolicyRuleGroupsDeleteFuture
type FlowLogFormatParameters = original.FlowLogFormatParameters
type FlowLogInformation = original.FlowLogInformation
type FlowLogProperties = original.FlowLogProperties
@@ -1317,6 +1391,7 @@ type GatewayRouteListResult = original.GatewayRouteListResult
type GetVpnSitesConfigurationRequest = original.GetVpnSitesConfigurationRequest
type HTTPConfiguration = original.HTTPConfiguration
type HTTPHeader = original.HTTPHeader
+type HubIPAddresses = original.HubIPAddresses
type HubVirtualNetworkConnection = original.HubVirtualNetworkConnection
type HubVirtualNetworkConnectionProperties = original.HubVirtualNetworkConnectionProperties
type HubVirtualNetworkConnectionsClient = original.HubVirtualNetworkConnectionsClient
@@ -1620,6 +1695,7 @@ type RouteTablesUpdateTagsFuture = original.RouteTablesUpdateTagsFuture
type RoutesClient = original.RoutesClient
type RoutesCreateOrUpdateFuture = original.RoutesCreateOrUpdateFuture
type RoutesDeleteFuture = original.RoutesDeleteFuture
+type RuleCondition = original.RuleCondition
type SecurityGroup = original.SecurityGroup
type SecurityGroupListResult = original.SecurityGroupListResult
type SecurityGroupListResultIterator = original.SecurityGroupListResultIterator
@@ -1682,6 +1758,7 @@ type SubnetsClient = original.SubnetsClient
type SubnetsCreateOrUpdateFuture = original.SubnetsCreateOrUpdateFuture
type SubnetsDeleteFuture = original.SubnetsDeleteFuture
type SubnetsPrepareNetworkPoliciesFuture = original.SubnetsPrepareNetworkPoliciesFuture
+type SubnetsUnprepareNetworkPoliciesFuture = original.SubnetsUnprepareNetworkPoliciesFuture
type TagsObject = original.TagsObject
type Topology = original.Topology
type TopologyAssociation = original.TopologyAssociation
@@ -1695,6 +1772,7 @@ type TroubleshootingProperties = original.TroubleshootingProperties
type TroubleshootingRecommendedActions = original.TroubleshootingRecommendedActions
type TroubleshootingResult = original.TroubleshootingResult
type TunnelConnectionHealth = original.TunnelConnectionHealth
+type UnprepareNetworkPoliciesRequest = original.UnprepareNetworkPoliciesRequest
type Usage = original.Usage
type UsageName = original.UsageName
type UsagesClient = original.UsagesClient
@@ -2149,6 +2227,30 @@ func NewExpressRouteServiceProvidersClient(subscriptionID string) ExpressRouteSe
func NewExpressRouteServiceProvidersClientWithBaseURI(baseURI string, subscriptionID string) ExpressRouteServiceProvidersClient {
return original.NewExpressRouteServiceProvidersClientWithBaseURI(baseURI, subscriptionID)
}
+func NewFirewallPoliciesClient(subscriptionID string) FirewallPoliciesClient {
+ return original.NewFirewallPoliciesClient(subscriptionID)
+}
+func NewFirewallPoliciesClientWithBaseURI(baseURI string, subscriptionID string) FirewallPoliciesClient {
+ return original.NewFirewallPoliciesClientWithBaseURI(baseURI, subscriptionID)
+}
+func NewFirewallPolicyListResultIterator(page FirewallPolicyListResultPage) FirewallPolicyListResultIterator {
+ return original.NewFirewallPolicyListResultIterator(page)
+}
+func NewFirewallPolicyListResultPage(getNextPage func(context.Context, FirewallPolicyListResult) (FirewallPolicyListResult, error)) FirewallPolicyListResultPage {
+ return original.NewFirewallPolicyListResultPage(getNextPage)
+}
+func NewFirewallPolicyRuleGroupListResultIterator(page FirewallPolicyRuleGroupListResultPage) FirewallPolicyRuleGroupListResultIterator {
+ return original.NewFirewallPolicyRuleGroupListResultIterator(page)
+}
+func NewFirewallPolicyRuleGroupListResultPage(getNextPage func(context.Context, FirewallPolicyRuleGroupListResult) (FirewallPolicyRuleGroupListResult, error)) FirewallPolicyRuleGroupListResultPage {
+ return original.NewFirewallPolicyRuleGroupListResultPage(getNextPage)
+}
+func NewFirewallPolicyRuleGroupsClient(subscriptionID string) FirewallPolicyRuleGroupsClient {
+ return original.NewFirewallPolicyRuleGroupsClient(subscriptionID)
+}
+func NewFirewallPolicyRuleGroupsClientWithBaseURI(baseURI string, subscriptionID string) FirewallPolicyRuleGroupsClient {
+ return original.NewFirewallPolicyRuleGroupsClientWithBaseURI(baseURI, subscriptionID)
+}
func NewHubVirtualNetworkConnectionsClient(subscriptionID string) HubVirtualNetworkConnectionsClient {
return original.NewHubVirtualNetworkConnectionsClient(subscriptionID)
}
@@ -2902,6 +3004,18 @@ func PossibleExpressRoutePeeringTypeValues() []ExpressRoutePeeringType {
func PossibleExpressRoutePortsEncapsulationValues() []ExpressRoutePortsEncapsulation {
return original.PossibleExpressRoutePortsEncapsulationValues()
}
+func PossibleFirewallPolicyFilterRuleActionTypeValues() []FirewallPolicyFilterRuleActionType {
+ return original.PossibleFirewallPolicyFilterRuleActionTypeValues()
+}
+func PossibleFirewallPolicyNatRuleActionTypeValues() []FirewallPolicyNatRuleActionType {
+ return original.PossibleFirewallPolicyNatRuleActionTypeValues()
+}
+func PossibleFirewallPolicyRuleConditionApplicationProtocolTypeValues() []FirewallPolicyRuleConditionApplicationProtocolType {
+ return original.PossibleFirewallPolicyRuleConditionApplicationProtocolTypeValues()
+}
+func PossibleFirewallPolicyRuleConditionNetworkProtocolValues() []FirewallPolicyRuleConditionNetworkProtocol {
+ return original.PossibleFirewallPolicyRuleConditionNetworkProtocolValues()
+}
func PossibleFlowLogFormatTypeValues() []FlowLogFormatType {
return original.PossibleFlowLogFormatTypeValues()
}
@@ -2995,6 +3109,12 @@ func PossibleResourceIdentityTypeValues() []ResourceIdentityType {
func PossibleRouteNextHopTypeValues() []RouteNextHopType {
return original.PossibleRouteNextHopTypeValues()
}
+func PossibleRuleConditionTypeValues() []RuleConditionType {
+ return original.PossibleRuleConditionTypeValues()
+}
+func PossibleRuleTypeValues() []RuleType {
+ return original.PossibleRuleTypeValues()
+}
func PossibleSecurityRuleAccessValues() []SecurityRuleAccess {
return original.PossibleSecurityRuleAccessValues()
}
diff --git a/profiles/preview/network/mgmt/network/networkapi/models.go b/profiles/preview/network/mgmt/network/networkapi/models.go
index b6dc4a0e532b..13c640036a2a 100644
--- a/profiles/preview/network/mgmt/network/networkapi/models.go
+++ b/profiles/preview/network/mgmt/network/networkapi/models.go
@@ -48,6 +48,8 @@ type ExpressRouteLinksClientAPI = original.ExpressRouteLinksClientAPI
type ExpressRoutePortsClientAPI = original.ExpressRoutePortsClientAPI
type ExpressRoutePortsLocationsClientAPI = original.ExpressRoutePortsLocationsClientAPI
type ExpressRouteServiceProvidersClientAPI = original.ExpressRouteServiceProvidersClientAPI
+type FirewallPoliciesClientAPI = original.FirewallPoliciesClientAPI
+type FirewallPolicyRuleGroupsClientAPI = original.FirewallPolicyRuleGroupsClientAPI
type HubVirtualNetworkConnectionsClientAPI = original.HubVirtualNetworkConnectionsClientAPI
type InboundNatRulesClientAPI = original.InboundNatRulesClientAPI
type InterfaceIPConfigurationsClientAPI = original.InterfaceIPConfigurationsClientAPI
diff --git a/profiles/preview/preview/billing/mgmt/billing/billingapi/models.go b/profiles/preview/preview/billing/mgmt/billing/billingapi/models.go
index 7d015cc8d955..1bd9da01c6e7 100644
--- a/profiles/preview/preview/billing/mgmt/billing/billingapi/models.go
+++ b/profiles/preview/preview/billing/mgmt/billing/billingapi/models.go
@@ -25,6 +25,7 @@ type AccountsClientAPI = original.AccountsClientAPI
type AddressesClientAPI = original.AddressesClientAPI
type AgreementsClientAPI = original.AgreementsClientAPI
type AvailableBalancesClientAPI = original.AvailableBalancesClientAPI
+type CustomersClientAPI = original.CustomersClientAPI
type DepartmentsClientAPI = original.DepartmentsClientAPI
type EnrollmentAccountsClientAPI = original.EnrollmentAccountsClientAPI
type InvoiceSectionsClientAPI = original.InvoiceSectionsClientAPI
diff --git a/profiles/preview/preview/billing/mgmt/billing/models.go b/profiles/preview/preview/billing/mgmt/billing/models.go
index 1c7f3a337240..2443bbc6ff68 100644
--- a/profiles/preview/preview/billing/mgmt/billing/models.go
+++ b/profiles/preview/preview/billing/mgmt/billing/models.go
@@ -194,6 +194,8 @@ type AcceptTransferProperties = original.AcceptTransferProperties
type AcceptTransferRequest = original.AcceptTransferRequest
type Account = original.Account
type AccountListResult = original.AccountListResult
+type AccountListResultIterator = original.AccountListResultIterator
+type AccountListResultPage = original.AccountListResultPage
type AccountProperties = original.AccountProperties
type AccountUpdateProperties = original.AccountUpdateProperties
type AccountsClient = original.AccountsClient
@@ -202,6 +204,8 @@ type Address = original.Address
type AddressesClient = original.AddressesClient
type Agreement = original.Agreement
type AgreementListResult = original.AgreementListResult
+type AgreementListResultIterator = original.AgreementListResultIterator
+type AgreementListResultPage = original.AgreementListResultPage
type AgreementProperties = original.AgreementProperties
type AgreementsClient = original.AgreementsClient
type Amount = original.Amount
@@ -209,8 +213,16 @@ type AvailableBalance = original.AvailableBalance
type AvailableBalanceProperties = original.AvailableBalanceProperties
type AvailableBalancesClient = original.AvailableBalancesClient
type BaseClient = original.BaseClient
+type Customer = original.Customer
+type CustomerListResult = original.CustomerListResult
+type CustomerListResultIterator = original.CustomerListResultIterator
+type CustomerListResultPage = original.CustomerListResultPage
+type CustomerProperties = original.CustomerProperties
+type CustomersClient = original.CustomersClient
type Department = original.Department
type DepartmentListResult = original.DepartmentListResult
+type DepartmentListResultIterator = original.DepartmentListResultIterator
+type DepartmentListResultPage = original.DepartmentListResultPage
type DepartmentProperties = original.DepartmentProperties
type DepartmentsClient = original.DepartmentsClient
type DetailedTransferStatus = original.DetailedTransferStatus
@@ -221,6 +233,8 @@ type Enrollment = original.Enrollment
type EnrollmentAccount = original.EnrollmentAccount
type EnrollmentAccountContext = original.EnrollmentAccountContext
type EnrollmentAccountListResult = original.EnrollmentAccountListResult
+type EnrollmentAccountListResultIterator = original.EnrollmentAccountListResultIterator
+type EnrollmentAccountListResultPage = original.EnrollmentAccountListResultPage
type EnrollmentAccountProperties = original.EnrollmentAccountProperties
type EnrollmentAccountsClient = original.EnrollmentAccountsClient
type EnrollmentPolicies = original.EnrollmentPolicies
@@ -230,9 +244,13 @@ type ErrorResponse = original.ErrorResponse
type InitiateTransferProperties = original.InitiateTransferProperties
type InitiateTransferRequest = original.InitiateTransferRequest
type InvoiceListResult = original.InvoiceListResult
+type InvoiceListResultIterator = original.InvoiceListResultIterator
+type InvoiceListResultPage = original.InvoiceListResultPage
type InvoiceSection = original.InvoiceSection
type InvoiceSectionCreationRequest = original.InvoiceSectionCreationRequest
type InvoiceSectionListResult = original.InvoiceSectionListResult
+type InvoiceSectionListResultIterator = original.InvoiceSectionListResultIterator
+type InvoiceSectionListResultPage = original.InvoiceSectionListResultPage
type InvoiceSectionProperties = original.InvoiceSectionProperties
type InvoiceSectionsClient = original.InvoiceSectionsClient
type InvoiceSectionsCreateFuture = original.InvoiceSectionsCreateFuture
@@ -277,6 +295,8 @@ type ProductsListResultPage = original.ProductsListResultPage
type Profile = original.Profile
type ProfileCreationParameters = original.ProfileCreationParameters
type ProfileListResult = original.ProfileListResult
+type ProfileListResultIterator = original.ProfileListResultIterator
+type ProfileListResultPage = original.ProfileListResultPage
type ProfileProperties = original.ProfileProperties
type ProfilesClient = original.ProfilesClient
type ProfilesCreateFuture = original.ProfilesCreateFuture
@@ -290,6 +310,7 @@ type RecipientTransferDetailsListResultIterator = original.RecipientTransferDeta
type RecipientTransferDetailsListResultPage = original.RecipientTransferDetailsListResultPage
type RecipientTransferProperties = original.RecipientTransferProperties
type RecipientTransfersClient = original.RecipientTransfersClient
+type Reseller = original.Reseller
type Resource = original.Resource
type RoleAssignment = original.RoleAssignment
type RoleAssignmentListResult = original.RoleAssignmentListResult
@@ -336,6 +357,12 @@ type ValidateSubscriptionTransferEligibilityResult = original.ValidateSubscripti
func New(subscriptionID string) BaseClient {
return original.New(subscriptionID)
}
+func NewAccountListResultIterator(page AccountListResultPage) AccountListResultIterator {
+ return original.NewAccountListResultIterator(page)
+}
+func NewAccountListResultPage(getNextPage func(context.Context, AccountListResult) (AccountListResult, error)) AccountListResultPage {
+ return original.NewAccountListResultPage(getNextPage)
+}
func NewAccountsClient(subscriptionID string) AccountsClient {
return original.NewAccountsClient(subscriptionID)
}
@@ -348,6 +375,12 @@ func NewAddressesClient(subscriptionID string) AddressesClient {
func NewAddressesClientWithBaseURI(baseURI string, subscriptionID string) AddressesClient {
return original.NewAddressesClientWithBaseURI(baseURI, subscriptionID)
}
+func NewAgreementListResultIterator(page AgreementListResultPage) AgreementListResultIterator {
+ return original.NewAgreementListResultIterator(page)
+}
+func NewAgreementListResultPage(getNextPage func(context.Context, AgreementListResult) (AgreementListResult, error)) AgreementListResultPage {
+ return original.NewAgreementListResultPage(getNextPage)
+}
func NewAgreementsClient(subscriptionID string) AgreementsClient {
return original.NewAgreementsClient(subscriptionID)
}
@@ -360,18 +393,54 @@ func NewAvailableBalancesClient(subscriptionID string) AvailableBalancesClient {
func NewAvailableBalancesClientWithBaseURI(baseURI string, subscriptionID string) AvailableBalancesClient {
return original.NewAvailableBalancesClientWithBaseURI(baseURI, subscriptionID)
}
+func NewCustomerListResultIterator(page CustomerListResultPage) CustomerListResultIterator {
+ return original.NewCustomerListResultIterator(page)
+}
+func NewCustomerListResultPage(getNextPage func(context.Context, CustomerListResult) (CustomerListResult, error)) CustomerListResultPage {
+ return original.NewCustomerListResultPage(getNextPage)
+}
+func NewCustomersClient(subscriptionID string) CustomersClient {
+ return original.NewCustomersClient(subscriptionID)
+}
+func NewCustomersClientWithBaseURI(baseURI string, subscriptionID string) CustomersClient {
+ return original.NewCustomersClientWithBaseURI(baseURI, subscriptionID)
+}
+func NewDepartmentListResultIterator(page DepartmentListResultPage) DepartmentListResultIterator {
+ return original.NewDepartmentListResultIterator(page)
+}
+func NewDepartmentListResultPage(getNextPage func(context.Context, DepartmentListResult) (DepartmentListResult, error)) DepartmentListResultPage {
+ return original.NewDepartmentListResultPage(getNextPage)
+}
func NewDepartmentsClient(subscriptionID string) DepartmentsClient {
return original.NewDepartmentsClient(subscriptionID)
}
func NewDepartmentsClientWithBaseURI(baseURI string, subscriptionID string) DepartmentsClient {
return original.NewDepartmentsClientWithBaseURI(baseURI, subscriptionID)
}
+func NewEnrollmentAccountListResultIterator(page EnrollmentAccountListResultPage) EnrollmentAccountListResultIterator {
+ return original.NewEnrollmentAccountListResultIterator(page)
+}
+func NewEnrollmentAccountListResultPage(getNextPage func(context.Context, EnrollmentAccountListResult) (EnrollmentAccountListResult, error)) EnrollmentAccountListResultPage {
+ return original.NewEnrollmentAccountListResultPage(getNextPage)
+}
func NewEnrollmentAccountsClient(subscriptionID string) EnrollmentAccountsClient {
return original.NewEnrollmentAccountsClient(subscriptionID)
}
func NewEnrollmentAccountsClientWithBaseURI(baseURI string, subscriptionID string) EnrollmentAccountsClient {
return original.NewEnrollmentAccountsClientWithBaseURI(baseURI, subscriptionID)
}
+func NewInvoiceListResultIterator(page InvoiceListResultPage) InvoiceListResultIterator {
+ return original.NewInvoiceListResultIterator(page)
+}
+func NewInvoiceListResultPage(getNextPage func(context.Context, InvoiceListResult) (InvoiceListResult, error)) InvoiceListResultPage {
+ return original.NewInvoiceListResultPage(getNextPage)
+}
+func NewInvoiceSectionListResultIterator(page InvoiceSectionListResultPage) InvoiceSectionListResultIterator {
+ return original.NewInvoiceSectionListResultIterator(page)
+}
+func NewInvoiceSectionListResultPage(getNextPage func(context.Context, InvoiceSectionListResult) (InvoiceSectionListResult, error)) InvoiceSectionListResultPage {
+ return original.NewInvoiceSectionListResultPage(getNextPage)
+}
func NewInvoiceSectionsClient(subscriptionID string) InvoiceSectionsClient {
return original.NewInvoiceSectionsClient(subscriptionID)
}
@@ -444,6 +513,12 @@ func NewProductsListResultIterator(page ProductsListResultPage) ProductsListResu
func NewProductsListResultPage(getNextPage func(context.Context, ProductsListResult) (ProductsListResult, error)) ProductsListResultPage {
return original.NewProductsListResultPage(getNextPage)
}
+func NewProfileListResultIterator(page ProfileListResultPage) ProfileListResultIterator {
+ return original.NewProfileListResultIterator(page)
+}
+func NewProfileListResultPage(getNextPage func(context.Context, ProfileListResult) (ProfileListResult, error)) ProfileListResultPage {
+ return original.NewProfileListResultPage(getNextPage)
+}
func NewProfilesClient(subscriptionID string) ProfilesClient {
return original.NewProfilesClient(subscriptionID)
}
diff --git a/profiles/preview/preview/cognitiveservices/inkrecognizer/models.go b/profiles/preview/preview/cognitiveservices/inkrecognizer/models.go
index a9dfcd2a29fc..eced44eb7b8c 100644
--- a/profiles/preview/preview/cognitiveservices/inkrecognizer/models.go
+++ b/profiles/preview/preview/cognitiveservices/inkrecognizer/models.go
@@ -36,8 +36,8 @@ const (
InkDrawing Category = original.InkDrawing
InkWord Category = original.InkWord
Line Category = original.Line
- ListItem Category = original.ListItem
Paragraph Category = original.Paragraph
+ Root Category = original.Root
Unknown Category = original.Unknown
WritingRegion Category = original.WritingRegion
)
@@ -53,11 +53,26 @@ type Container = original.Container
const (
ContainerLine Container = original.ContainerLine
- ContainerListItem Container = original.ContainerListItem
ContainerParagraph Container = original.ContainerParagraph
+ ContainerRoot Container = original.ContainerRoot
ContainerWritingRegion Container = original.ContainerWritingRegion
)
+type InputDevice = original.InputDevice
+
+const (
+ Armature InputDevice = original.Armature
+ ArticulatedArm InputDevice = original.ArticulatedArm
+ Digitizer InputDevice = original.Digitizer
+ LightPen InputDevice = original.LightPen
+ Pen InputDevice = original.Pen
+ StereoPlotter InputDevice = original.StereoPlotter
+ ThreedDigitizer InputDevice = original.ThreedDigitizer
+ TouchPad InputDevice = original.TouchPad
+ TouchScreen InputDevice = original.TouchScreen
+ WhiteBoard InputDevice = original.WhiteBoard
+)
+
type Kind = original.Kind
const (
@@ -125,14 +140,6 @@ const (
Mm Unit = original.Mm
)
-type Unit1 = original.Unit1
-
-const (
- Unit1Cm Unit1 = original.Unit1Cm
- Unit1In Unit1 = original.Unit1In
- Unit1Mm Unit1 = original.Unit1Mm
-)
-
type AlternatePatternItem = original.AlternatePatternItem
type AnalysisRequest = original.AnalysisRequest
type AnalysisResponse = original.AnalysisResponse
@@ -142,6 +149,8 @@ type DrawingAttributesPattern = original.DrawingAttributesPattern
type DrawingAttributesPatternColor = original.DrawingAttributesPatternColor
type ErrorModel = original.ErrorModel
type ErrorModelDetailsItem = original.ErrorModelDetailsItem
+type InkPoint = original.InkPoint
+type InkPointValueAttribute = original.InkPointValueAttribute
type PointDetailsPattern = original.PointDetailsPattern
type RecognitionUnitItem = original.RecognitionUnitItem
type RecognitionUnitItemBoundingRectangle = original.RecognitionUnitItemBoundingRectangle
@@ -168,6 +177,9 @@ func PossibleClassValues() []Class {
func PossibleContainerValues() []Container {
return original.PossibleContainerValues()
}
+func PossibleInputDeviceValues() []InputDevice {
+ return original.PossibleInputDeviceValues()
+}
func PossibleKindValues() []Kind {
return original.PossibleKindValues()
}
@@ -183,9 +195,6 @@ func PossibleShapeValues() []Shape {
func PossibleTipValues() []Tip {
return original.PossibleTipValues()
}
-func PossibleUnit1Values() []Unit1 {
- return original.PossibleUnit1Values()
-}
func PossibleUnitValues() []Unit {
return original.PossibleUnitValues()
}
diff --git a/profiles/preview/preview/containerregistry/runtime/containerregistry/containerregistryapi/models.go b/profiles/preview/preview/containerregistry/runtime/containerregistry/containerregistryapi/models.go
new file mode 100644
index 000000000000..737eef8e9563
--- /dev/null
+++ b/profiles/preview/preview/containerregistry/runtime/containerregistry/containerregistryapi/models.go
@@ -0,0 +1,24 @@
+// +build go1.9
+
+// Copyright 2019 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This code was auto-generated by:
+// github.com/Azure/azure-sdk-for-go/tools/profileBuilder
+
+package containerregistryapi
+
+import original "github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-07/containerregistry/containerregistryapi"
+
+type BaseClientAPI = original.BaseClientAPI
diff --git a/profiles/preview/preview/containerregistry/runtime/containerregistry/models.go b/profiles/preview/preview/containerregistry/runtime/containerregistry/models.go
new file mode 100644
index 000000000000..df98f91b6ca2
--- /dev/null
+++ b/profiles/preview/preview/containerregistry/runtime/containerregistry/models.go
@@ -0,0 +1,65 @@
+// +build go1.9
+
+// Copyright 2019 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This code was auto-generated by:
+// github.com/Azure/azure-sdk-for-go/tools/profileBuilder
+
+package containerregistry
+
+import original "github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-07/containerregistry"
+
+type AccessToken = original.AccessToken
+type AcrErrorInfo = original.AcrErrorInfo
+type AcrErrors = original.AcrErrors
+type AcrManifestAttributes = original.AcrManifestAttributes
+type AcrManifestAttributesBase = original.AcrManifestAttributesBase
+type AcrManifests = original.AcrManifests
+type AcrRepositoryTags = original.AcrRepositoryTags
+type AcrTagAttributes = original.AcrTagAttributes
+type AcrTagAttributesBase = original.AcrTagAttributesBase
+type BaseClient = original.BaseClient
+type ChangeableAttributes = original.ChangeableAttributes
+type DeletedRepository = original.DeletedRepository
+type FsLayer = original.FsLayer
+type History = original.History
+type ImageSignature = original.ImageSignature
+type JWK = original.JWK
+type JWKHeader = original.JWKHeader
+type Manifest = original.Manifest
+type ManifestAttributesManifest = original.ManifestAttributesManifest
+type ManifestAttributesManifestReferences = original.ManifestAttributesManifestReferences
+type ManifestChangeableAttributes = original.ManifestChangeableAttributes
+type RefreshToken = original.RefreshToken
+type Repositories = original.Repositories
+type RepositoryAttributes = original.RepositoryAttributes
+type RepositoryTags = original.RepositoryTags
+type SetObject = original.SetObject
+type TagAttributes = original.TagAttributes
+type TagAttributesTag = original.TagAttributesTag
+type V2Descriptor = original.V2Descriptor
+
+func New(loginURI string) BaseClient {
+ return original.New(loginURI)
+}
+func NewWithoutDefaults(loginURI string) BaseClient {
+ return original.NewWithoutDefaults(loginURI)
+}
+func UserAgent() string {
+ return original.UserAgent() + " profiles/preview"
+}
+func Version() string {
+ return original.Version()
+}
diff --git a/profiles/preview/preview/datamigration/mgmt/datamigration/models.go b/profiles/preview/preview/datamigration/mgmt/datamigration/models.go
index 549a17b74283..e854ea0ec7ff 100644
--- a/profiles/preview/preview/datamigration/mgmt/datamigration/models.go
+++ b/profiles/preview/preview/datamigration/mgmt/datamigration/models.go
@@ -84,11 +84,11 @@ type CommandType = original.CommandType
const (
CommandTypeCancel CommandType = original.CommandTypeCancel
+ CommandTypeCommandProperties CommandType = original.CommandTypeCommandProperties
CommandTypeFinish CommandType = original.CommandTypeFinish
CommandTypeMigrateSQLServerAzureDbSQLMiComplete CommandType = original.CommandTypeMigrateSQLServerAzureDbSQLMiComplete
CommandTypeMigrateSyncCompleteDatabase CommandType = original.CommandTypeMigrateSyncCompleteDatabase
CommandTypeRestart CommandType = original.CommandTypeRestart
- CommandTypeUnknown CommandType = original.CommandTypeUnknown
)
type DatabaseCompatLevel = original.DatabaseCompatLevel
@@ -610,7 +610,7 @@ const (
TaskTypeGetUserTablesSQL TaskType = original.TaskTypeGetUserTablesSQL
TaskTypeMigrateMongoDb TaskType = original.TaskTypeMigrateMongoDb
TaskTypeMigrateMySQLAzureDbForMySQLSync TaskType = original.TaskTypeMigrateMySQLAzureDbForMySQLSync
- TaskTypeMigrateOracleSQLSync TaskType = original.TaskTypeMigrateOracleSQLSync
+ TaskTypeMigrateOracleAzureDbForPostgreSQLSync TaskType = original.TaskTypeMigrateOracleAzureDbForPostgreSQLSync
TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync TaskType = original.TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync
TaskTypeMigrateSchemaSQLServerSQLDb TaskType = original.TaskTypeMigrateSchemaSQLServerSQLDb
TaskTypeMigrateSQLServerAzureSQLDbMI TaskType = original.TaskTypeMigrateSQLServerAzureSQLDbMI
@@ -618,10 +618,10 @@ const (
TaskTypeMigrateSQLServerAzureSQLDbSync TaskType = original.TaskTypeMigrateSQLServerAzureSQLDbSync
TaskTypeMigrateSQLServerSQLDb TaskType = original.TaskTypeMigrateSQLServerSQLDb
TaskTypeMigrateSsis TaskType = original.TaskTypeMigrateSsis
+ TaskTypeProjectTaskProperties TaskType = original.TaskTypeProjectTaskProperties
TaskTypeServiceCheckOCI TaskType = original.TaskTypeServiceCheckOCI
TaskTypeServiceInstallOCI TaskType = original.TaskTypeServiceInstallOCI
TaskTypeServiceUploadOCI TaskType = original.TaskTypeServiceUploadOCI
- TaskTypeUnknown TaskType = original.TaskTypeUnknown
TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI TaskType = original.TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI
TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS TaskType = original.TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS
TaskTypeValidateMigrationInputSQLServerSQLDbSync TaskType = original.TaskTypeValidateMigrationInputSQLServerSQLDbSync
@@ -632,13 +632,13 @@ const (
type Type = original.Type
const (
+ TypeConnectionInfo Type = original.TypeConnectionInfo
TypeMiSQLConnectionInfo Type = original.TypeMiSQLConnectionInfo
TypeMongoDbConnectionInfo Type = original.TypeMongoDbConnectionInfo
TypeMySQLConnectionInfo Type = original.TypeMySQLConnectionInfo
TypeOracleConnectionInfo Type = original.TypeOracleConnectionInfo
TypePostgreSQLConnectionInfo Type = original.TypePostgreSQLConnectionInfo
TypeSQLConnectionInfo Type = original.TypeSQLConnectionInfo
- TypeUnknown Type = original.TypeUnknown
)
type UpdateActionType = original.UpdateActionType
diff --git a/profiles/preview/preview/frontdoor/mgmt/frontdoor/frontdoorapi/models.go b/profiles/preview/preview/frontdoor/mgmt/frontdoor/frontdoorapi/models.go
index 472442f7e262..b2d40a2d8b82 100644
--- a/profiles/preview/preview/frontdoor/mgmt/frontdoor/frontdoorapi/models.go
+++ b/profiles/preview/preview/frontdoor/mgmt/frontdoor/frontdoorapi/models.go
@@ -19,7 +19,7 @@
package frontdoorapi
-import original "github.com/Azure/azure-sdk-for-go/services/preview/frontdoor/mgmt/2019-04-01/frontdoor/frontdoorapi"
+import original "github.com/Azure/azure-sdk-for-go/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/frontdoorapi"
type BackendPoolsClientAPI = original.BackendPoolsClientAPI
type BaseClientAPI = original.BaseClientAPI
diff --git a/profiles/preview/preview/frontdoor/mgmt/frontdoor/models.go b/profiles/preview/preview/frontdoor/mgmt/frontdoor/models.go
index 81b0bf1af97b..38aa56ec23bf 100644
--- a/profiles/preview/preview/frontdoor/mgmt/frontdoor/models.go
+++ b/profiles/preview/preview/frontdoor/mgmt/frontdoor/models.go
@@ -22,7 +22,7 @@ package frontdoor
import (
"context"
- original "github.com/Azure/azure-sdk-for-go/services/preview/frontdoor/mgmt/2019-04-01/frontdoor"
+ original "github.com/Azure/azure-sdk-for-go/services/preview/frontdoor/mgmt/2019-05-01/frontdoor"
)
const (
@@ -126,6 +126,20 @@ const (
MatchRequest ForwardingProtocol = original.MatchRequest
)
+type HealthProbeEnabled = original.HealthProbeEnabled
+
+const (
+ HealthProbeEnabledDisabled HealthProbeEnabled = original.HealthProbeEnabledDisabled
+ HealthProbeEnabledEnabled HealthProbeEnabled = original.HealthProbeEnabledEnabled
+)
+
+type HealthProbeMethod = original.HealthProbeMethod
+
+const (
+ GET HealthProbeMethod = original.GET
+ HEAD HealthProbeMethod = original.HEAD
+)
+
type ManagedRuleEnabledState = original.ManagedRuleEnabledState
const (
@@ -144,6 +158,14 @@ const (
RequestHeader MatchVariable = original.RequestHeader
RequestMethod MatchVariable = original.RequestMethod
RequestURI MatchVariable = original.RequestURI
+ SocketAddr MatchVariable = original.SocketAddr
+)
+
+type MinimumTLSVersion = original.MinimumTLSVersion
+
+const (
+ OneFullStopTwo MinimumTLSVersion = original.OneFullStopTwo
+ OneFullStopZero MinimumTLSVersion = original.OneFullStopZero
)
type NetworkOperationStatus = original.NetworkOperationStatus
@@ -274,12 +296,6 @@ const (
SessionAffinityEnabledStateEnabled SessionAffinityEnabledState = original.SessionAffinityEnabledStateEnabled
)
-type TLSProtocolType = original.TLSProtocolType
-
-const (
- ServerNameIndication TLSProtocolType = original.ServerNameIndication
-)
-
type TransformType = original.TransformType
const (
@@ -544,12 +560,21 @@ func PossibleEnforceCertificateNameCheckEnabledStateValues() []EnforceCertificat
func PossibleForwardingProtocolValues() []ForwardingProtocol {
return original.PossibleForwardingProtocolValues()
}
+func PossibleHealthProbeEnabledValues() []HealthProbeEnabled {
+ return original.PossibleHealthProbeEnabledValues()
+}
+func PossibleHealthProbeMethodValues() []HealthProbeMethod {
+ return original.PossibleHealthProbeMethodValues()
+}
func PossibleManagedRuleEnabledStateValues() []ManagedRuleEnabledState {
return original.PossibleManagedRuleEnabledStateValues()
}
func PossibleMatchVariableValues() []MatchVariable {
return original.PossibleMatchVariableValues()
}
+func PossibleMinimumTLSVersionValues() []MinimumTLSVersion {
+ return original.PossibleMinimumTLSVersionValues()
+}
func PossibleNetworkOperationStatusValues() []NetworkOperationStatus {
return original.PossibleNetworkOperationStatusValues()
}
@@ -595,9 +620,6 @@ func PossibleRuleTypeValues() []RuleType {
func PossibleSessionAffinityEnabledStateValues() []SessionAffinityEnabledState {
return original.PossibleSessionAffinityEnabledStateValues()
}
-func PossibleTLSProtocolTypeValues() []TLSProtocolType {
- return original.PossibleTLSProtocolTypeValues()
-}
func PossibleTransformTypeValues() []TransformType {
return original.PossibleTransformTypeValues()
}
diff --git a/profiles/preview/preview/hanaonazure/mgmt/hanaonazure/models.go b/profiles/preview/preview/hanaonazure/mgmt/hanaonazure/models.go
index e32f4baf2adc..9c3bafe16ebf 100644
--- a/profiles/preview/preview/hanaonazure/mgmt/hanaonazure/models.go
+++ b/profiles/preview/preview/hanaonazure/mgmt/hanaonazure/models.go
@@ -96,7 +96,6 @@ type HanaInstanceProperties = original.HanaInstanceProperties
type HanaInstancesClient = original.HanaInstancesClient
type HanaInstancesCreateFuture = original.HanaInstancesCreateFuture
type HanaInstancesDeleteFuture = original.HanaInstancesDeleteFuture
-type HanaInstancesEnableMonitoringFuture = original.HanaInstancesEnableMonitoringFuture
type HanaInstancesListResult = original.HanaInstancesListResult
type HanaInstancesListResultIterator = original.HanaInstancesListResultIterator
type HanaInstancesListResultPage = original.HanaInstancesListResultPage
diff --git a/profiles/preview/preview/healthcareapis/mgmt/healthcareapis/models.go b/profiles/preview/preview/healthcareapis/mgmt/healthcareapis/models.go
index 40e3bc255a2f..813c88d1695d 100644
--- a/profiles/preview/preview/healthcareapis/mgmt/healthcareapis/models.go
+++ b/profiles/preview/preview/healthcareapis/mgmt/healthcareapis/models.go
@@ -29,6 +29,14 @@ const (
DefaultBaseURI = original.DefaultBaseURI
)
+type Kind = original.Kind
+
+const (
+ Fhir Kind = original.Fhir
+ FhirR4 Kind = original.FhirR4
+ FhirStu3 Kind = original.FhirStu3
+)
+
type OperationResultStatus = original.OperationResultStatus
const (
@@ -126,6 +134,9 @@ func NewServicesDescriptionListResultPage(getNextPage func(context.Context, Serv
func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient {
return original.NewWithBaseURI(baseURI, subscriptionID)
}
+func PossibleKindValues() []Kind {
+ return original.PossibleKindValues()
+}
func PossibleOperationResultStatusValues() []OperationResultStatus {
return original.PossibleOperationResultStatusValues()
}
diff --git a/profiles/preview/preview/managednetwork/mgmt/managednetwork/managednetworkapi/models.go b/profiles/preview/preview/managednetwork/mgmt/managednetwork/managednetworkapi/models.go
new file mode 100644
index 000000000000..01732589b0cd
--- /dev/null
+++ b/profiles/preview/preview/managednetwork/mgmt/managednetwork/managednetworkapi/models.go
@@ -0,0 +1,28 @@
+// +build go1.9
+
+// Copyright 2019 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This code was auto-generated by:
+// github.com/Azure/azure-sdk-for-go/tools/profileBuilder
+
+package managednetworkapi
+
+import original "github.com/Azure/azure-sdk-for-go/services/preview/managednetwork/mgmt/2019-06-01-preview/managednetwork/managednetworkapi"
+
+type GroupsClientAPI = original.GroupsClientAPI
+type ManagedNetworksClientAPI = original.ManagedNetworksClientAPI
+type OperationsClientAPI = original.OperationsClientAPI
+type PeeringPoliciesClientAPI = original.PeeringPoliciesClientAPI
+type ScopeAssignmentsClientAPI = original.ScopeAssignmentsClientAPI
diff --git a/profiles/preview/preview/managednetwork/mgmt/managednetwork/models.go b/profiles/preview/preview/managednetwork/mgmt/managednetwork/models.go
new file mode 100644
index 000000000000..d2b5de3e5177
--- /dev/null
+++ b/profiles/preview/preview/managednetwork/mgmt/managednetwork/models.go
@@ -0,0 +1,183 @@
+// +build go1.9
+
+// Copyright 2019 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This code was auto-generated by:
+// github.com/Azure/azure-sdk-for-go/tools/profileBuilder
+
+package managednetwork
+
+import (
+ "context"
+
+ original "github.com/Azure/azure-sdk-for-go/services/preview/managednetwork/mgmt/2019-06-01-preview/managednetwork"
+)
+
+const (
+ DefaultBaseURI = original.DefaultBaseURI
+)
+
+type Kind = original.Kind
+
+const (
+ Connectivity Kind = original.Connectivity
+)
+
+type ProvisioningState = original.ProvisioningState
+
+const (
+ Deleting ProvisioningState = original.Deleting
+ Failed ProvisioningState = original.Failed
+ Succeeded ProvisioningState = original.Succeeded
+ Updating ProvisioningState = original.Updating
+)
+
+type Type = original.Type
+
+const (
+ HubAndSpokeTopology Type = original.HubAndSpokeTopology
+ MeshTopology Type = original.MeshTopology
+)
+
+type BaseClient = original.BaseClient
+type ConnectivityCollection = original.ConnectivityCollection
+type ErrorResponse = original.ErrorResponse
+type Group = original.Group
+type GroupListResult = original.GroupListResult
+type GroupListResultIterator = original.GroupListResultIterator
+type GroupListResultPage = original.GroupListResultPage
+type GroupProperties = original.GroupProperties
+type GroupsClient = original.GroupsClient
+type GroupsCreateOrUpdateFuture = original.GroupsCreateOrUpdateFuture
+type GroupsDeleteFuture = original.GroupsDeleteFuture
+type HubAndSpokePeeringPolicyProperties = original.HubAndSpokePeeringPolicyProperties
+type ListResult = original.ListResult
+type ListResultIterator = original.ListResultIterator
+type ListResultPage = original.ListResultPage
+type ManagedNetwork = original.ManagedNetwork
+type ManagedNetworksClient = original.ManagedNetworksClient
+type ManagedNetworksDeleteFutureType = original.ManagedNetworksDeleteFutureType
+type ManagedNetworksUpdateFutureType = original.ManagedNetworksUpdateFutureType
+type MeshPeeringPolicyProperties = original.MeshPeeringPolicyProperties
+type Operation = original.Operation
+type OperationDisplay = original.OperationDisplay
+type OperationListResult = original.OperationListResult
+type OperationListResultIterator = original.OperationListResultIterator
+type OperationListResultPage = original.OperationListResultPage
+type OperationsClient = original.OperationsClient
+type PeeringPoliciesClient = original.PeeringPoliciesClient
+type PeeringPoliciesCreateOrUpdateFuture = original.PeeringPoliciesCreateOrUpdateFuture
+type PeeringPoliciesDeleteFuture = original.PeeringPoliciesDeleteFuture
+type PeeringPolicy = original.PeeringPolicy
+type PeeringPolicyListResult = original.PeeringPolicyListResult
+type PeeringPolicyListResultIterator = original.PeeringPolicyListResultIterator
+type PeeringPolicyListResultPage = original.PeeringPolicyListResultPage
+type PeeringPolicyProperties = original.PeeringPolicyProperties
+type Properties = original.Properties
+type ProxyResource = original.ProxyResource
+type Resource = original.Resource
+type ResourceID = original.ResourceID
+type ResourceProperties = original.ResourceProperties
+type Scope = original.Scope
+type ScopeAssignment = original.ScopeAssignment
+type ScopeAssignmentListResult = original.ScopeAssignmentListResult
+type ScopeAssignmentListResultIterator = original.ScopeAssignmentListResultIterator
+type ScopeAssignmentListResultPage = original.ScopeAssignmentListResultPage
+type ScopeAssignmentProperties = original.ScopeAssignmentProperties
+type ScopeAssignmentsClient = original.ScopeAssignmentsClient
+type TrackedResource = original.TrackedResource
+type Update = original.Update
+
+func New(subscriptionID string) BaseClient {
+ return original.New(subscriptionID)
+}
+func NewGroupListResultIterator(page GroupListResultPage) GroupListResultIterator {
+ return original.NewGroupListResultIterator(page)
+}
+func NewGroupListResultPage(getNextPage func(context.Context, GroupListResult) (GroupListResult, error)) GroupListResultPage {
+ return original.NewGroupListResultPage(getNextPage)
+}
+func NewGroupsClient(subscriptionID string) GroupsClient {
+ return original.NewGroupsClient(subscriptionID)
+}
+func NewGroupsClientWithBaseURI(baseURI string, subscriptionID string) GroupsClient {
+ return original.NewGroupsClientWithBaseURI(baseURI, subscriptionID)
+}
+func NewListResultIterator(page ListResultPage) ListResultIterator {
+ return original.NewListResultIterator(page)
+}
+func NewListResultPage(getNextPage func(context.Context, ListResult) (ListResult, error)) ListResultPage {
+ return original.NewListResultPage(getNextPage)
+}
+func NewManagedNetworksClient(subscriptionID string) ManagedNetworksClient {
+ return original.NewManagedNetworksClient(subscriptionID)
+}
+func NewManagedNetworksClientWithBaseURI(baseURI string, subscriptionID string) ManagedNetworksClient {
+ return original.NewManagedNetworksClientWithBaseURI(baseURI, subscriptionID)
+}
+func NewOperationListResultIterator(page OperationListResultPage) OperationListResultIterator {
+ return original.NewOperationListResultIterator(page)
+}
+func NewOperationListResultPage(getNextPage func(context.Context, OperationListResult) (OperationListResult, error)) OperationListResultPage {
+ return original.NewOperationListResultPage(getNextPage)
+}
+func NewOperationsClient(subscriptionID string) OperationsClient {
+ return original.NewOperationsClient(subscriptionID)
+}
+func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient {
+ return original.NewOperationsClientWithBaseURI(baseURI, subscriptionID)
+}
+func NewPeeringPoliciesClient(subscriptionID string) PeeringPoliciesClient {
+ return original.NewPeeringPoliciesClient(subscriptionID)
+}
+func NewPeeringPoliciesClientWithBaseURI(baseURI string, subscriptionID string) PeeringPoliciesClient {
+ return original.NewPeeringPoliciesClientWithBaseURI(baseURI, subscriptionID)
+}
+func NewPeeringPolicyListResultIterator(page PeeringPolicyListResultPage) PeeringPolicyListResultIterator {
+ return original.NewPeeringPolicyListResultIterator(page)
+}
+func NewPeeringPolicyListResultPage(getNextPage func(context.Context, PeeringPolicyListResult) (PeeringPolicyListResult, error)) PeeringPolicyListResultPage {
+ return original.NewPeeringPolicyListResultPage(getNextPage)
+}
+func NewScopeAssignmentListResultIterator(page ScopeAssignmentListResultPage) ScopeAssignmentListResultIterator {
+ return original.NewScopeAssignmentListResultIterator(page)
+}
+func NewScopeAssignmentListResultPage(getNextPage func(context.Context, ScopeAssignmentListResult) (ScopeAssignmentListResult, error)) ScopeAssignmentListResultPage {
+ return original.NewScopeAssignmentListResultPage(getNextPage)
+}
+func NewScopeAssignmentsClient(subscriptionID string) ScopeAssignmentsClient {
+ return original.NewScopeAssignmentsClient(subscriptionID)
+}
+func NewScopeAssignmentsClientWithBaseURI(baseURI string, subscriptionID string) ScopeAssignmentsClient {
+ return original.NewScopeAssignmentsClientWithBaseURI(baseURI, subscriptionID)
+}
+func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient {
+ return original.NewWithBaseURI(baseURI, subscriptionID)
+}
+func PossibleKindValues() []Kind {
+ return original.PossibleKindValues()
+}
+func PossibleProvisioningStateValues() []ProvisioningState {
+ return original.PossibleProvisioningStateValues()
+}
+func PossibleTypeValues() []Type {
+ return original.PossibleTypeValues()
+}
+func UserAgent() string {
+ return original.UserAgent() + " profiles/preview"
+}
+func Version() string {
+ return original.Version()
+}
diff --git a/profiles/preview/preview/peering/mgmt/peering/models.go b/profiles/preview/preview/peering/mgmt/peering/models.go
index 944d06af4fc5..cabe32ebf48e 100644
--- a/profiles/preview/preview/peering/mgmt/peering/models.go
+++ b/profiles/preview/preview/peering/mgmt/peering/models.go
@@ -22,7 +22,7 @@ package peering
import (
"context"
- original "github.com/Azure/azure-sdk-for-go/services/preview/peering/mgmt/2019-03-01-preview/peering"
+ original "github.com/Azure/azure-sdk-for-go/services/preview/peering/mgmt/2019-08-01-preview/peering"
)
const (
@@ -42,6 +42,15 @@ const (
Validating ConnectionState = original.Validating
)
+type DirectPeeringType = original.DirectPeeringType
+
+const (
+ Cdn DirectPeeringType = original.Cdn
+ Edge DirectPeeringType = original.Edge
+ Internal DirectPeeringType = original.Internal
+ Transit DirectPeeringType = original.Transit
+)
+
type Family = original.Family
const (
@@ -56,15 +65,36 @@ const (
KindExchange Kind = original.KindExchange
)
+type LearnedType = original.LearnedType
+
+const (
+ LearnedTypeNone LearnedType = original.LearnedTypeNone
+ LearnedTypeViaPartner LearnedType = original.LearnedTypeViaPartner
+ LearnedTypeViaSession LearnedType = original.LearnedTypeViaSession
+)
+
type Name = original.Name
const (
BasicDirectFree Name = original.BasicDirectFree
BasicExchangeFree Name = original.BasicExchangeFree
PremiumDirectFree Name = original.PremiumDirectFree
+ PremiumDirectMetered Name = original.PremiumDirectMetered
+ PremiumDirectUnlimited Name = original.PremiumDirectUnlimited
PremiumExchangeMetered Name = original.PremiumExchangeMetered
)
+type PrefixValidationState = original.PrefixValidationState
+
+const (
+ PrefixValidationStateFailed PrefixValidationState = original.PrefixValidationStateFailed
+ PrefixValidationStateInvalid PrefixValidationState = original.PrefixValidationStateInvalid
+ PrefixValidationStateNone PrefixValidationState = original.PrefixValidationStateNone
+ PrefixValidationStatePending PrefixValidationState = original.PrefixValidationStatePending
+ PrefixValidationStateUnknown PrefixValidationState = original.PrefixValidationStateUnknown
+ PrefixValidationStateVerified PrefixValidationState = original.PrefixValidationStateVerified
+)
+
type ProvisioningState = original.ProvisioningState
const (
@@ -74,6 +104,13 @@ const (
Updating ProvisioningState = original.Updating
)
+type SessionAddressProvider = original.SessionAddressProvider
+
+const (
+ Microsoft SessionAddressProvider = original.Microsoft
+ Peer SessionAddressProvider = original.Peer
+)
+
type SessionStateV4 = original.SessionStateV4
const (
@@ -83,6 +120,7 @@ const (
SessionStateV4Idle SessionStateV4 = original.SessionStateV4Idle
SessionStateV4None SessionStateV4 = original.SessionStateV4None
SessionStateV4OpenConfirm SessionStateV4 = original.SessionStateV4OpenConfirm
+ SessionStateV4OpenReceived SessionStateV4 = original.SessionStateV4OpenReceived
SessionStateV4OpenSent SessionStateV4 = original.SessionStateV4OpenSent
SessionStateV4PendingAdd SessionStateV4 = original.SessionStateV4PendingAdd
SessionStateV4PendingRemove SessionStateV4 = original.SessionStateV4PendingRemove
@@ -98,6 +136,7 @@ const (
SessionStateV6Idle SessionStateV6 = original.SessionStateV6Idle
SessionStateV6None SessionStateV6 = original.SessionStateV6None
SessionStateV6OpenConfirm SessionStateV6 = original.SessionStateV6OpenConfirm
+ SessionStateV6OpenReceived SessionStateV6 = original.SessionStateV6OpenReceived
SessionStateV6OpenSent SessionStateV6 = original.SessionStateV6OpenSent
SessionStateV6PendingAdd SessionStateV6 = original.SessionStateV6PendingAdd
SessionStateV6PendingRemove SessionStateV6 = original.SessionStateV6PendingRemove
@@ -131,6 +170,7 @@ const (
type BandwidthOffer = original.BandwidthOffer
type BaseClient = original.BaseClient
type BgpSession = original.BgpSession
+type CheckServiceProviderAvailabilityInput = original.CheckServiceProviderAvailabilityInput
type ContactInfo = original.ContactInfo
type DirectConnection = original.DirectConnection
type DirectPeeringFacility = original.DirectPeeringFacility
@@ -163,12 +203,38 @@ type PeerAsnListResultPage = original.PeerAsnListResultPage
type PeerAsnProperties = original.PeerAsnProperties
type PeerAsnsClient = original.PeerAsnsClient
type PeeringsClient = original.PeeringsClient
+type PrefixesClient = original.PrefixesClient
type Properties = original.Properties
type PropertiesDirect = original.PropertiesDirect
type PropertiesExchange = original.PropertiesExchange
type Resource = original.Resource
type ResourceTags = original.ResourceTags
+type Service = original.Service
+type ServiceListResult = original.ServiceListResult
+type ServiceListResultIterator = original.ServiceListResultIterator
+type ServiceListResultPage = original.ServiceListResultPage
+type ServiceLocation = original.ServiceLocation
+type ServiceLocationListResult = original.ServiceLocationListResult
+type ServiceLocationListResultIterator = original.ServiceLocationListResultIterator
+type ServiceLocationListResultPage = original.ServiceLocationListResultPage
+type ServiceLocationProperties = original.ServiceLocationProperties
+type ServiceLocationsClient = original.ServiceLocationsClient
+type ServicePrefix = original.ServicePrefix
+type ServicePrefixListResult = original.ServicePrefixListResult
+type ServicePrefixListResultIterator = original.ServicePrefixListResultIterator
+type ServicePrefixListResultPage = original.ServicePrefixListResultPage
+type ServicePrefixProperties = original.ServicePrefixProperties
+type ServicePrefixesClient = original.ServicePrefixesClient
+type ServiceProperties = original.ServiceProperties
+type ServiceProvider = original.ServiceProvider
+type ServiceProviderListResult = original.ServiceProviderListResult
+type ServiceProviderListResultIterator = original.ServiceProviderListResultIterator
+type ServiceProviderListResultPage = original.ServiceProviderListResultPage
+type ServiceProviderProperties = original.ServiceProviderProperties
+type ServiceProvidersClient = original.ServiceProvidersClient
+type ServicesClient = original.ServicesClient
type Sku = original.Sku
+type String = original.String
type SubResource = original.SubResource
func New(subscriptionID string) BaseClient {
@@ -228,24 +294,90 @@ func NewPeeringsClient(subscriptionID string) PeeringsClient {
func NewPeeringsClientWithBaseURI(baseURI string, subscriptionID string) PeeringsClient {
return original.NewPeeringsClientWithBaseURI(baseURI, subscriptionID)
}
+func NewPrefixesClient(subscriptionID string) PrefixesClient {
+ return original.NewPrefixesClient(subscriptionID)
+}
+func NewPrefixesClientWithBaseURI(baseURI string, subscriptionID string) PrefixesClient {
+ return original.NewPrefixesClientWithBaseURI(baseURI, subscriptionID)
+}
+func NewServiceListResultIterator(page ServiceListResultPage) ServiceListResultIterator {
+ return original.NewServiceListResultIterator(page)
+}
+func NewServiceListResultPage(getNextPage func(context.Context, ServiceListResult) (ServiceListResult, error)) ServiceListResultPage {
+ return original.NewServiceListResultPage(getNextPage)
+}
+func NewServiceLocationListResultIterator(page ServiceLocationListResultPage) ServiceLocationListResultIterator {
+ return original.NewServiceLocationListResultIterator(page)
+}
+func NewServiceLocationListResultPage(getNextPage func(context.Context, ServiceLocationListResult) (ServiceLocationListResult, error)) ServiceLocationListResultPage {
+ return original.NewServiceLocationListResultPage(getNextPage)
+}
+func NewServiceLocationsClient(subscriptionID string) ServiceLocationsClient {
+ return original.NewServiceLocationsClient(subscriptionID)
+}
+func NewServiceLocationsClientWithBaseURI(baseURI string, subscriptionID string) ServiceLocationsClient {
+ return original.NewServiceLocationsClientWithBaseURI(baseURI, subscriptionID)
+}
+func NewServicePrefixListResultIterator(page ServicePrefixListResultPage) ServicePrefixListResultIterator {
+ return original.NewServicePrefixListResultIterator(page)
+}
+func NewServicePrefixListResultPage(getNextPage func(context.Context, ServicePrefixListResult) (ServicePrefixListResult, error)) ServicePrefixListResultPage {
+ return original.NewServicePrefixListResultPage(getNextPage)
+}
+func NewServicePrefixesClient(subscriptionID string) ServicePrefixesClient {
+ return original.NewServicePrefixesClient(subscriptionID)
+}
+func NewServicePrefixesClientWithBaseURI(baseURI string, subscriptionID string) ServicePrefixesClient {
+ return original.NewServicePrefixesClientWithBaseURI(baseURI, subscriptionID)
+}
+func NewServiceProviderListResultIterator(page ServiceProviderListResultPage) ServiceProviderListResultIterator {
+ return original.NewServiceProviderListResultIterator(page)
+}
+func NewServiceProviderListResultPage(getNextPage func(context.Context, ServiceProviderListResult) (ServiceProviderListResult, error)) ServiceProviderListResultPage {
+ return original.NewServiceProviderListResultPage(getNextPage)
+}
+func NewServiceProvidersClient(subscriptionID string) ServiceProvidersClient {
+ return original.NewServiceProvidersClient(subscriptionID)
+}
+func NewServiceProvidersClientWithBaseURI(baseURI string, subscriptionID string) ServiceProvidersClient {
+ return original.NewServiceProvidersClientWithBaseURI(baseURI, subscriptionID)
+}
+func NewServicesClient(subscriptionID string) ServicesClient {
+ return original.NewServicesClient(subscriptionID)
+}
+func NewServicesClientWithBaseURI(baseURI string, subscriptionID string) ServicesClient {
+ return original.NewServicesClientWithBaseURI(baseURI, subscriptionID)
+}
func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient {
return original.NewWithBaseURI(baseURI, subscriptionID)
}
func PossibleConnectionStateValues() []ConnectionState {
return original.PossibleConnectionStateValues()
}
+func PossibleDirectPeeringTypeValues() []DirectPeeringType {
+ return original.PossibleDirectPeeringTypeValues()
+}
func PossibleFamilyValues() []Family {
return original.PossibleFamilyValues()
}
func PossibleKindValues() []Kind {
return original.PossibleKindValues()
}
+func PossibleLearnedTypeValues() []LearnedType {
+ return original.PossibleLearnedTypeValues()
+}
func PossibleNameValues() []Name {
return original.PossibleNameValues()
}
+func PossiblePrefixValidationStateValues() []PrefixValidationState {
+ return original.PossiblePrefixValidationStateValues()
+}
func PossibleProvisioningStateValues() []ProvisioningState {
return original.PossibleProvisioningStateValues()
}
+func PossibleSessionAddressProviderValues() []SessionAddressProvider {
+ return original.PossibleSessionAddressProviderValues()
+}
func PossibleSessionStateV4Values() []SessionStateV4 {
return original.PossibleSessionStateV4Values()
}
diff --git a/profiles/preview/preview/peering/mgmt/peering/peeringapi/models.go b/profiles/preview/preview/peering/mgmt/peering/peeringapi/models.go
index 8a6e6b8888c5..d3fea5f8eafa 100644
--- a/profiles/preview/preview/peering/mgmt/peering/peeringapi/models.go
+++ b/profiles/preview/preview/peering/mgmt/peering/peeringapi/models.go
@@ -19,10 +19,16 @@
package peeringapi
-import original "github.com/Azure/azure-sdk-for-go/services/preview/peering/mgmt/2019-03-01-preview/peering/peeringapi"
+import original "github.com/Azure/azure-sdk-for-go/services/preview/peering/mgmt/2019-08-01-preview/peering/peeringapi"
+type BaseClientAPI = original.BaseClientAPI
type LegacyPeeringsClientAPI = original.LegacyPeeringsClientAPI
type LocationsClientAPI = original.LocationsClientAPI
type OperationsClientAPI = original.OperationsClientAPI
type PeerAsnsClientAPI = original.PeerAsnsClientAPI
type PeeringsClientAPI = original.PeeringsClientAPI
+type PrefixesClientAPI = original.PrefixesClientAPI
+type ServiceLocationsClientAPI = original.ServiceLocationsClientAPI
+type ServicePrefixesClientAPI = original.ServicePrefixesClientAPI
+type ServiceProvidersClientAPI = original.ServiceProvidersClientAPI
+type ServicesClientAPI = original.ServicesClientAPI
diff --git a/profiles/preview/preview/reservations/mgmt/reservations/models.go b/profiles/preview/preview/reservations/mgmt/reservations/models.go
index d67c52105d97..56e317240017 100644
--- a/profiles/preview/preview/reservations/mgmt/reservations/models.go
+++ b/profiles/preview/preview/reservations/mgmt/reservations/models.go
@@ -175,15 +175,18 @@ type Patch = original.Patch
type PatchProperties = original.PatchProperties
type PatchPropertiesRenewProperties = original.PatchPropertiesRenewProperties
type Properties = original.Properties
+type PropertiesType = original.PropertiesType
type PurchaseRequest = original.PurchaseRequest
type PurchaseRequestProperties = original.PurchaseRequestProperties
type PurchaseRequestPropertiesReservedResourceProperties = original.PurchaseRequestPropertiesReservedResourceProperties
type RenewPropertiesResponse = original.RenewPropertiesResponse
type RenewPropertiesResponseBillingCurrencyTotal = original.RenewPropertiesResponseBillingCurrencyTotal
type RenewPropertiesResponsePricingCurrencyTotal = original.RenewPropertiesResponsePricingCurrencyTotal
+type ReservationAvailableScopesFuture = original.ReservationAvailableScopesFuture
type ReservationMergeFuture = original.ReservationMergeFuture
type ReservationUpdateFuture = original.ReservationUpdateFuture
type Response = original.Response
+type ScopeProperties = original.ScopeProperties
type SkuName = original.SkuName
type SkuProperty = original.SkuProperty
type SkuRestriction = original.SkuRestriction
@@ -191,6 +194,7 @@ type SplitFuture = original.SplitFuture
type SplitProperties = original.SplitProperties
type SplitPropertiesType = original.SplitPropertiesType
type SplitRequest = original.SplitRequest
+type SubscriptionScopeProperties = original.SubscriptionScopeProperties
func New() BaseClient {
return original.New()
diff --git a/profiles/preview/preview/securityinsight/mgmt/securityinsight/models.go b/profiles/preview/preview/securityinsight/mgmt/securityinsight/models.go
index c922c6c0f486..b3dbd8e93239 100644
--- a/profiles/preview/preview/securityinsight/mgmt/securityinsight/models.go
+++ b/profiles/preview/preview/securityinsight/mgmt/securityinsight/models.go
@@ -38,6 +38,8 @@ const (
type AlertRuleKind = original.AlertRuleKind
const (
+ Filter AlertRuleKind = original.Filter
+ Fusion AlertRuleKind = original.Fusion
Scheduled AlertRuleKind = original.Scheduled
)
@@ -50,6 +52,32 @@ const (
Medium AlertSeverity = original.Medium
)
+type AlertStatus = original.AlertStatus
+
+const (
+ AlertStatusDismissed AlertStatus = original.AlertStatusDismissed
+ AlertStatusInProgress AlertStatus = original.AlertStatusInProgress
+ AlertStatusNew AlertStatus = original.AlertStatusNew
+ AlertStatusResolved AlertStatus = original.AlertStatusResolved
+ AlertStatusUnknown AlertStatus = original.AlertStatusUnknown
+)
+
+type AttackTactic = original.AttackTactic
+
+const (
+ Collection AttackTactic = original.Collection
+ CommandAndControl AttackTactic = original.CommandAndControl
+ CredentialAccess AttackTactic = original.CredentialAccess
+ DefenseEvasion AttackTactic = original.DefenseEvasion
+ Discovery AttackTactic = original.Discovery
+ Execution AttackTactic = original.Execution
+ Exfiltration AttackTactic = original.Exfiltration
+ InitialAccess AttackTactic = original.InitialAccess
+ LateralMovement AttackTactic = original.LateralMovement
+ Persistence AttackTactic = original.Persistence
+ PrivilegeEscalation AttackTactic = original.PrivilegeEscalation
+)
+
type CaseSeverity = original.CaseSeverity
const (
@@ -72,22 +100,41 @@ const (
type CloseReason = original.CloseReason
const (
- Dismissed CloseReason = original.Dismissed
- Other CloseReason = original.Other
- Resolved CloseReason = original.Resolved
+ Dismissed CloseReason = original.Dismissed
+ FalsePositive CloseReason = original.FalsePositive
+ Other CloseReason = original.Other
+ Resolved CloseReason = original.Resolved
+ TruePositive CloseReason = original.TruePositive
+)
+
+type ConfidenceLevel = original.ConfidenceLevel
+
+const (
+ ConfidenceLevelHigh ConfidenceLevel = original.ConfidenceLevelHigh
+ ConfidenceLevelLow ConfidenceLevel = original.ConfidenceLevelLow
+ ConfidenceLevelUnknown ConfidenceLevel = original.ConfidenceLevelUnknown
+)
+
+type ConfidenceScoreStatus = original.ConfidenceScoreStatus
+
+const (
+ Final ConfidenceScoreStatus = original.Final
+ InProcess ConfidenceScoreStatus = original.InProcess
+ NotApplicable ConfidenceScoreStatus = original.NotApplicable
+ NotFinal ConfidenceScoreStatus = original.NotFinal
)
type DataConnectorKind = original.DataConnectorKind
const (
- AmazonWebServicesCloudTrail DataConnectorKind = original.AmazonWebServicesCloudTrail
- AzureActiveDirectory DataConnectorKind = original.AzureActiveDirectory
- AzureAdvancedThreatProtection DataConnectorKind = original.AzureAdvancedThreatProtection
- AzureSecurityCenter DataConnectorKind = original.AzureSecurityCenter
- MicrosoftCloudAppSecurity DataConnectorKind = original.MicrosoftCloudAppSecurity
- MicrosoftDefenderAdvancedThreatProtection DataConnectorKind = original.MicrosoftDefenderAdvancedThreatProtection
- Office365 DataConnectorKind = original.Office365
- ThreatIntelligence DataConnectorKind = original.ThreatIntelligence
+ DataConnectorKindAmazonWebServicesCloudTrail DataConnectorKind = original.DataConnectorKindAmazonWebServicesCloudTrail
+ DataConnectorKindAzureActiveDirectory DataConnectorKind = original.DataConnectorKindAzureActiveDirectory
+ DataConnectorKindAzureAdvancedThreatProtection DataConnectorKind = original.DataConnectorKindAzureAdvancedThreatProtection
+ DataConnectorKindAzureSecurityCenter DataConnectorKind = original.DataConnectorKindAzureSecurityCenter
+ DataConnectorKindMicrosoftCloudAppSecurity DataConnectorKind = original.DataConnectorKindMicrosoftCloudAppSecurity
+ DataConnectorKindMicrosoftDefenderAdvancedThreatProtection DataConnectorKind = original.DataConnectorKindMicrosoftDefenderAdvancedThreatProtection
+ DataConnectorKindOffice365 DataConnectorKind = original.DataConnectorKindOffice365
+ DataConnectorKindThreatIntelligence DataConnectorKind = original.DataConnectorKindThreatIntelligence
)
type DataTypeState = original.DataTypeState
@@ -97,12 +144,90 @@ const (
Enabled DataTypeState = original.Enabled
)
+type DataTypeStatus = original.DataTypeStatus
+
+const (
+ Exist DataTypeStatus = original.Exist
+ NotExist DataTypeStatus = original.NotExist
+)
+
+type ElevationToken = original.ElevationToken
+
+const (
+ Default ElevationToken = original.Default
+ Full ElevationToken = original.Full
+ Limited ElevationToken = original.Limited
+)
+
type EntityKind = original.EntityKind
const (
- Account EntityKind = original.Account
- File EntityKind = original.File
- Host EntityKind = original.Host
+ EntityKindAccount EntityKind = original.EntityKindAccount
+ EntityKindAzureResource EntityKind = original.EntityKindAzureResource
+ EntityKindBookmark EntityKind = original.EntityKindBookmark
+ EntityKindCloudApplication EntityKind = original.EntityKindCloudApplication
+ EntityKindDNSResolution EntityKind = original.EntityKindDNSResolution
+ EntityKindFile EntityKind = original.EntityKindFile
+ EntityKindFileHash EntityKind = original.EntityKindFileHash
+ EntityKindHost EntityKind = original.EntityKindHost
+ EntityKindIP EntityKind = original.EntityKindIP
+ EntityKindMalware EntityKind = original.EntityKindMalware
+ EntityKindProcess EntityKind = original.EntityKindProcess
+ EntityKindRegistryKey EntityKind = original.EntityKindRegistryKey
+ EntityKindRegistryValue EntityKind = original.EntityKindRegistryValue
+ EntityKindSecurityAlert EntityKind = original.EntityKindSecurityAlert
+ EntityKindSecurityGroup EntityKind = original.EntityKindSecurityGroup
+ EntityKindURL EntityKind = original.EntityKindURL
+)
+
+type EntityType = original.EntityType
+
+const (
+ EntityTypeAccount EntityType = original.EntityTypeAccount
+ EntityTypeAzureResource EntityType = original.EntityTypeAzureResource
+ EntityTypeCloudApplication EntityType = original.EntityTypeCloudApplication
+ EntityTypeDNS EntityType = original.EntityTypeDNS
+ EntityTypeFile EntityType = original.EntityTypeFile
+ EntityTypeFileHash EntityType = original.EntityTypeFileHash
+ EntityTypeHost EntityType = original.EntityTypeHost
+ EntityTypeHuntingBookmark EntityType = original.EntityTypeHuntingBookmark
+ EntityTypeIP EntityType = original.EntityTypeIP
+ EntityTypeMalware EntityType = original.EntityTypeMalware
+ EntityTypeProcess EntityType = original.EntityTypeProcess
+ EntityTypeRegistryKey EntityType = original.EntityTypeRegistryKey
+ EntityTypeRegistryValue EntityType = original.EntityTypeRegistryValue
+ EntityTypeSecurityAlert EntityType = original.EntityTypeSecurityAlert
+ EntityTypeSecurityGroup EntityType = original.EntityTypeSecurityGroup
+ EntityTypeURL EntityType = original.EntityTypeURL
+)
+
+type FileHashAlgorithm = original.FileHashAlgorithm
+
+const (
+ MD5 FileHashAlgorithm = original.MD5
+ SHA1 FileHashAlgorithm = original.SHA1
+ SHA256 FileHashAlgorithm = original.SHA256
+ SHA256AC FileHashAlgorithm = original.SHA256AC
+ Unknown FileHashAlgorithm = original.Unknown
+)
+
+type KillChainIntent = original.KillChainIntent
+
+const (
+ KillChainIntentCollection KillChainIntent = original.KillChainIntentCollection
+ KillChainIntentCommandAndControl KillChainIntent = original.KillChainIntentCommandAndControl
+ KillChainIntentCredentialAccess KillChainIntent = original.KillChainIntentCredentialAccess
+ KillChainIntentDefenseEvasion KillChainIntent = original.KillChainIntentDefenseEvasion
+ KillChainIntentDiscovery KillChainIntent = original.KillChainIntentDiscovery
+ KillChainIntentExecution KillChainIntent = original.KillChainIntentExecution
+ KillChainIntentExfiltration KillChainIntent = original.KillChainIntentExfiltration
+ KillChainIntentExploitation KillChainIntent = original.KillChainIntentExploitation
+ KillChainIntentImpact KillChainIntent = original.KillChainIntentImpact
+ KillChainIntentLateralMovement KillChainIntent = original.KillChainIntentLateralMovement
+ KillChainIntentPersistence KillChainIntent = original.KillChainIntentPersistence
+ KillChainIntentPrivilegeEscalation KillChainIntent = original.KillChainIntentPrivilegeEscalation
+ KillChainIntentProbing KillChainIntent = original.KillChainIntentProbing
+ KillChainIntentUnknown KillChainIntent = original.KillChainIntentUnknown
)
type Kind = original.Kind
@@ -119,6 +244,15 @@ const (
KindCasesAggregation KindBasicAggregations = original.KindCasesAggregation
)
+type KindBasicAlertRuleTemplate = original.KindBasicAlertRuleTemplate
+
+const (
+ KindBasicAlertRuleTemplateKindAlertRuleTemplate KindBasicAlertRuleTemplate = original.KindBasicAlertRuleTemplateKindAlertRuleTemplate
+ KindBasicAlertRuleTemplateKindFilter KindBasicAlertRuleTemplate = original.KindBasicAlertRuleTemplateKindFilter
+ KindBasicAlertRuleTemplateKindFusion KindBasicAlertRuleTemplate = original.KindBasicAlertRuleTemplateKindFusion
+ KindBasicAlertRuleTemplateKindScheduled KindBasicAlertRuleTemplate = original.KindBasicAlertRuleTemplateKindScheduled
+)
+
type KindBasicDataConnector = original.KindBasicDataConnector
const (
@@ -136,10 +270,22 @@ const (
type KindBasicEntity = original.KindBasicEntity
const (
- KindAccount KindBasicEntity = original.KindAccount
- KindEntity KindBasicEntity = original.KindEntity
- KindFile KindBasicEntity = original.KindFile
- KindHost KindBasicEntity = original.KindHost
+ KindAccount KindBasicEntity = original.KindAccount
+ KindAzureResource KindBasicEntity = original.KindAzureResource
+ KindCloudApplication KindBasicEntity = original.KindCloudApplication
+ KindDNSResolution KindBasicEntity = original.KindDNSResolution
+ KindEntity KindBasicEntity = original.KindEntity
+ KindFile KindBasicEntity = original.KindFile
+ KindFileHash KindBasicEntity = original.KindFileHash
+ KindHost KindBasicEntity = original.KindHost
+ KindIP KindBasicEntity = original.KindIP
+ KindMalware KindBasicEntity = original.KindMalware
+ KindProcess KindBasicEntity = original.KindProcess
+ KindRegistryKey KindBasicEntity = original.KindRegistryKey
+ KindRegistryValue KindBasicEntity = original.KindRegistryValue
+ KindSecurityAlert KindBasicEntity = original.KindSecurityAlert
+ KindSecurityGroup KindBasicEntity = original.KindSecurityGroup
+ KindURL KindBasicEntity = original.KindURL
)
type KindBasicSettings = original.KindBasicSettings
@@ -166,6 +312,34 @@ const (
Windows OSFamily = original.Windows
)
+type RegistryHive = original.RegistryHive
+
+const (
+ HKEYA RegistryHive = original.HKEYA
+ HKEYCLASSESROOT RegistryHive = original.HKEYCLASSESROOT
+ HKEYCURRENTCONFIG RegistryHive = original.HKEYCURRENTCONFIG
+ HKEYCURRENTUSER RegistryHive = original.HKEYCURRENTUSER
+ HKEYCURRENTUSERLOCALSETTINGS RegistryHive = original.HKEYCURRENTUSERLOCALSETTINGS
+ HKEYLOCALMACHINE RegistryHive = original.HKEYLOCALMACHINE
+ HKEYPERFORMANCEDATA RegistryHive = original.HKEYPERFORMANCEDATA
+ HKEYPERFORMANCENLSTEXT RegistryHive = original.HKEYPERFORMANCENLSTEXT
+ HKEYPERFORMANCETEXT RegistryHive = original.HKEYPERFORMANCETEXT
+ HKEYUSERS RegistryHive = original.HKEYUSERS
+)
+
+type RegistryValueKind = original.RegistryValueKind
+
+const (
+ RegistryValueKindBinary RegistryValueKind = original.RegistryValueKindBinary
+ RegistryValueKindDWord RegistryValueKind = original.RegistryValueKindDWord
+ RegistryValueKindExpandString RegistryValueKind = original.RegistryValueKindExpandString
+ RegistryValueKindMultiString RegistryValueKind = original.RegistryValueKindMultiString
+ RegistryValueKindNone RegistryValueKind = original.RegistryValueKindNone
+ RegistryValueKindQWord RegistryValueKind = original.RegistryValueKindQWord
+ RegistryValueKindString RegistryValueKind = original.RegistryValueKindString
+ RegistryValueKindUnknown RegistryValueKind = original.RegistryValueKindUnknown
+)
+
type SettingKind = original.SettingKind
const (
@@ -180,6 +354,14 @@ const (
StatusInMcasEnabled StatusInMcas = original.StatusInMcasEnabled
)
+type TemplateStatus = original.TemplateStatus
+
+const (
+ Available TemplateStatus = original.Available
+ Installed TemplateStatus = original.Installed
+ NotAvailable TemplateStatus = original.NotAvailable
+)
+
type TriggerOperator = original.TriggerOperator
const (
@@ -209,6 +391,12 @@ type AggregationsModel = original.AggregationsModel
type AlertRule = original.AlertRule
type AlertRuleKind1 = original.AlertRuleKind1
type AlertRuleModel = original.AlertRuleModel
+type AlertRuleTemplate = original.AlertRuleTemplate
+type AlertRuleTemplateModel = original.AlertRuleTemplateModel
+type AlertRuleTemplatesClient = original.AlertRuleTemplatesClient
+type AlertRuleTemplatesList = original.AlertRuleTemplatesList
+type AlertRuleTemplatesListIterator = original.AlertRuleTemplatesListIterator
+type AlertRuleTemplatesListPage = original.AlertRuleTemplatesListPage
type AlertRulesClient = original.AlertRulesClient
type AlertRulesList = original.AlertRulesList
type AlertRulesListIterator = original.AlertRulesListIterator
@@ -219,9 +407,13 @@ type AwsCloudTrailDataConnector = original.AwsCloudTrailDataConnector
type AwsCloudTrailDataConnectorDataTypes = original.AwsCloudTrailDataConnectorDataTypes
type AwsCloudTrailDataConnectorDataTypesLogs = original.AwsCloudTrailDataConnectorDataTypesLogs
type AwsCloudTrailDataConnectorProperties = original.AwsCloudTrailDataConnectorProperties
+type AzureResourceEntity = original.AzureResourceEntity
+type AzureResourceEntityProperties = original.AzureResourceEntityProperties
+type BaseAlertRuleTemplateProperties = original.BaseAlertRuleTemplateProperties
type BaseClient = original.BaseClient
type BasicAggregations = original.BasicAggregations
type BasicAlertRule = original.BasicAlertRule
+type BasicAlertRuleTemplate = original.BasicAlertRuleTemplate
type BasicDataConnector = original.BasicDataConnector
type BasicEntity = original.BasicEntity
type BasicSettings = original.BasicSettings
@@ -232,6 +424,12 @@ type BookmarkListPage = original.BookmarkListPage
type BookmarkProperties = original.BookmarkProperties
type BookmarksClient = original.BookmarksClient
type Case = original.Case
+type CaseComment = original.CaseComment
+type CaseCommentList = original.CaseCommentList
+type CaseCommentListIterator = original.CaseCommentListIterator
+type CaseCommentListPage = original.CaseCommentListPage
+type CaseCommentProperties = original.CaseCommentProperties
+type CaseCommentsClient = original.CaseCommentsClient
type CaseList = original.CaseList
type CaseListIterator = original.CaseListIterator
type CaseListPage = original.CaseListPage
@@ -242,8 +440,13 @@ type CasesAggregationByStatusProperties = original.CasesAggregationByStatusPrope
type CasesAggregationProperties = original.CasesAggregationProperties
type CasesAggregationsClient = original.CasesAggregationsClient
type CasesClient = original.CasesClient
+type CloudApplicationEntity = original.CloudApplicationEntity
+type CloudApplicationEntityProperties = original.CloudApplicationEntityProperties
type CloudError = original.CloudError
type CloudErrorBody = original.CloudErrorBody
+type CommentsClient = original.CommentsClient
+type DNSEntity = original.DNSEntity
+type DNSEntityProperties = original.DNSEntityProperties
type DataConnector = original.DataConnector
type DataConnectorDataTypeCommon = original.DataConnectorDataTypeCommon
type DataConnectorKind1 = original.DataConnectorKind1
@@ -251,11 +454,16 @@ type DataConnectorList = original.DataConnectorList
type DataConnectorListIterator = original.DataConnectorListIterator
type DataConnectorListPage = original.DataConnectorListPage
type DataConnectorModel = original.DataConnectorModel
+type DataConnectorStatus = original.DataConnectorStatus
type DataConnectorTenantID = original.DataConnectorTenantID
type DataConnectorWithAlertsProperties = original.DataConnectorWithAlertsProperties
type DataConnectorsClient = original.DataConnectorsClient
type EntitiesClient = original.EntitiesClient
type Entity = original.Entity
+type EntityCommonProperties = original.EntityCommonProperties
+type EntityExpandParameters = original.EntityExpandParameters
+type EntityExpandResponse = original.EntityExpandResponse
+type EntityExpandResponseValue = original.EntityExpandResponseValue
type EntityKind1 = original.EntityKind1
type EntityList = original.EntityList
type EntityListIterator = original.EntityListIterator
@@ -267,16 +475,31 @@ type EntityQueryList = original.EntityQueryList
type EntityQueryListIterator = original.EntityQueryListIterator
type EntityQueryListPage = original.EntityQueryListPage
type EntityQueryProperties = original.EntityQueryProperties
+type ExpansionResultAggregation = original.ExpansionResultAggregation
+type ExpansionResultsMetadata = original.ExpansionResultsMetadata
type FileEntity = original.FileEntity
type FileEntityProperties = original.FileEntityProperties
+type FileHashEntity = original.FileHashEntity
+type FileHashEntityProperties = original.FileHashEntityProperties
+type FilterAlertRuleTemplate = original.FilterAlertRuleTemplate
+type FilterAlertRuleTemplateProperties = original.FilterAlertRuleTemplateProperties
+type FilterAlertRuleTemplatePropertiesModel = original.FilterAlertRuleTemplatePropertiesModel
+type FusionAlertRuleTemplate = original.FusionAlertRuleTemplate
+type FusionAlertRuleTemplateProperties = original.FusionAlertRuleTemplateProperties
+type FusionAlertRuleTemplatePropertiesModel = original.FusionAlertRuleTemplatePropertiesModel
+type GeoLocation = original.GeoLocation
type HostEntity = original.HostEntity
type HostEntityProperties = original.HostEntityProperties
+type IPEntity = original.IPEntity
+type IPEntityProperties = original.IPEntityProperties
type MCASDataConnector = original.MCASDataConnector
type MCASDataConnectorDataTypes = original.MCASDataConnectorDataTypes
type MCASDataConnectorDataTypesDiscoveryLogs = original.MCASDataConnectorDataTypesDiscoveryLogs
type MCASDataConnectorProperties = original.MCASDataConnectorProperties
type MDATPDataConnector = original.MDATPDataConnector
type MDATPDataConnectorProperties = original.MDATPDataConnectorProperties
+type MalwareEntity = original.MalwareEntity
+type MalwareEntityProperties = original.MalwareEntityProperties
type OfficeConsent = original.OfficeConsent
type OfficeConsentList = original.OfficeConsentList
type OfficeConsentListIterator = original.OfficeConsentListIterator
@@ -294,10 +517,24 @@ type OperationsClient = original.OperationsClient
type OperationsList = original.OperationsList
type OperationsListIterator = original.OperationsListIterator
type OperationsListPage = original.OperationsListPage
+type ProcessEntity = original.ProcessEntity
+type ProcessEntityProperties = original.ProcessEntityProperties
type ProductSettingsClient = original.ProductSettingsClient
+type RegistryKeyEntity = original.RegistryKeyEntity
+type RegistryKeyEntityProperties = original.RegistryKeyEntityProperties
+type RegistryValueEntity = original.RegistryValueEntity
+type RegistryValueEntityProperties = original.RegistryValueEntityProperties
type Resource = original.Resource
type ScheduledAlertRule = original.ScheduledAlertRule
type ScheduledAlertRuleProperties = original.ScheduledAlertRuleProperties
+type ScheduledAlertRuleTemplate = original.ScheduledAlertRuleTemplate
+type ScheduledAlertRuleTemplateProperties = original.ScheduledAlertRuleTemplateProperties
+type ScheduledAlertRuleTemplatePropertiesModel = original.ScheduledAlertRuleTemplatePropertiesModel
+type SecurityAlert = original.SecurityAlert
+type SecurityAlertProperties = original.SecurityAlertProperties
+type SecurityAlertPropertiesConfidenceReasonsItem = original.SecurityAlertPropertiesConfidenceReasonsItem
+type SecurityGroupEntity = original.SecurityGroupEntity
+type SecurityGroupEntityProperties = original.SecurityGroupEntityProperties
type Settings = original.Settings
type SettingsKind = original.SettingsKind
type SettingsModel = original.SettingsModel
@@ -305,8 +542,11 @@ type TIDataConnector = original.TIDataConnector
type TIDataConnectorDataTypes = original.TIDataConnectorDataTypes
type TIDataConnectorDataTypesIndicators = original.TIDataConnectorDataTypesIndicators
type TIDataConnectorProperties = original.TIDataConnectorProperties
+type ThreatIntelligence = original.ThreatIntelligence
type ToggleSettings = original.ToggleSettings
type ToggleSettingsProperties = original.ToggleSettingsProperties
+type URLEntity = original.URLEntity
+type URLEntityProperties = original.URLEntityProperties
type UebaSettings = original.UebaSettings
type UebaSettingsProperties = original.UebaSettingsProperties
type UserInfo = original.UserInfo
@@ -326,6 +566,18 @@ func NewActionsListIterator(page ActionsListPage) ActionsListIterator {
func NewActionsListPage(getNextPage func(context.Context, ActionsList) (ActionsList, error)) ActionsListPage {
return original.NewActionsListPage(getNextPage)
}
+func NewAlertRuleTemplatesClient(subscriptionID string) AlertRuleTemplatesClient {
+ return original.NewAlertRuleTemplatesClient(subscriptionID)
+}
+func NewAlertRuleTemplatesClientWithBaseURI(baseURI string, subscriptionID string) AlertRuleTemplatesClient {
+ return original.NewAlertRuleTemplatesClientWithBaseURI(baseURI, subscriptionID)
+}
+func NewAlertRuleTemplatesListIterator(page AlertRuleTemplatesListPage) AlertRuleTemplatesListIterator {
+ return original.NewAlertRuleTemplatesListIterator(page)
+}
+func NewAlertRuleTemplatesListPage(getNextPage func(context.Context, AlertRuleTemplatesList) (AlertRuleTemplatesList, error)) AlertRuleTemplatesListPage {
+ return original.NewAlertRuleTemplatesListPage(getNextPage)
+}
func NewAlertRulesClient(subscriptionID string) AlertRulesClient {
return original.NewAlertRulesClient(subscriptionID)
}
@@ -350,6 +602,18 @@ func NewBookmarksClient(subscriptionID string) BookmarksClient {
func NewBookmarksClientWithBaseURI(baseURI string, subscriptionID string) BookmarksClient {
return original.NewBookmarksClientWithBaseURI(baseURI, subscriptionID)
}
+func NewCaseCommentListIterator(page CaseCommentListPage) CaseCommentListIterator {
+ return original.NewCaseCommentListIterator(page)
+}
+func NewCaseCommentListPage(getNextPage func(context.Context, CaseCommentList) (CaseCommentList, error)) CaseCommentListPage {
+ return original.NewCaseCommentListPage(getNextPage)
+}
+func NewCaseCommentsClient(subscriptionID string) CaseCommentsClient {
+ return original.NewCaseCommentsClient(subscriptionID)
+}
+func NewCaseCommentsClientWithBaseURI(baseURI string, subscriptionID string) CaseCommentsClient {
+ return original.NewCaseCommentsClientWithBaseURI(baseURI, subscriptionID)
+}
func NewCaseListIterator(page CaseListPage) CaseListIterator {
return original.NewCaseListIterator(page)
}
@@ -368,6 +632,12 @@ func NewCasesClient(subscriptionID string) CasesClient {
func NewCasesClientWithBaseURI(baseURI string, subscriptionID string) CasesClient {
return original.NewCasesClientWithBaseURI(baseURI, subscriptionID)
}
+func NewCommentsClient(subscriptionID string) CommentsClient {
+ return original.NewCommentsClient(subscriptionID)
+}
+func NewCommentsClientWithBaseURI(baseURI string, subscriptionID string) CommentsClient {
+ return original.NewCommentsClientWithBaseURI(baseURI, subscriptionID)
+}
func NewDataConnectorListIterator(page DataConnectorListPage) DataConnectorListIterator {
return original.NewDataConnectorListIterator(page)
}
@@ -446,6 +716,12 @@ func PossibleAlertRuleKindValues() []AlertRuleKind {
func PossibleAlertSeverityValues() []AlertSeverity {
return original.PossibleAlertSeverityValues()
}
+func PossibleAlertStatusValues() []AlertStatus {
+ return original.PossibleAlertStatusValues()
+}
+func PossibleAttackTacticValues() []AttackTactic {
+ return original.PossibleAttackTacticValues()
+}
func PossibleCaseSeverityValues() []CaseSeverity {
return original.PossibleCaseSeverityValues()
}
@@ -455,18 +731,42 @@ func PossibleCaseStatusValues() []CaseStatus {
func PossibleCloseReasonValues() []CloseReason {
return original.PossibleCloseReasonValues()
}
+func PossibleConfidenceLevelValues() []ConfidenceLevel {
+ return original.PossibleConfidenceLevelValues()
+}
+func PossibleConfidenceScoreStatusValues() []ConfidenceScoreStatus {
+ return original.PossibleConfidenceScoreStatusValues()
+}
func PossibleDataConnectorKindValues() []DataConnectorKind {
return original.PossibleDataConnectorKindValues()
}
func PossibleDataTypeStateValues() []DataTypeState {
return original.PossibleDataTypeStateValues()
}
+func PossibleDataTypeStatusValues() []DataTypeStatus {
+ return original.PossibleDataTypeStatusValues()
+}
+func PossibleElevationTokenValues() []ElevationToken {
+ return original.PossibleElevationTokenValues()
+}
func PossibleEntityKindValues() []EntityKind {
return original.PossibleEntityKindValues()
}
+func PossibleEntityTypeValues() []EntityType {
+ return original.PossibleEntityTypeValues()
+}
+func PossibleFileHashAlgorithmValues() []FileHashAlgorithm {
+ return original.PossibleFileHashAlgorithmValues()
+}
+func PossibleKillChainIntentValues() []KillChainIntent {
+ return original.PossibleKillChainIntentValues()
+}
func PossibleKindBasicAggregationsValues() []KindBasicAggregations {
return original.PossibleKindBasicAggregationsValues()
}
+func PossibleKindBasicAlertRuleTemplateValues() []KindBasicAlertRuleTemplate {
+ return original.PossibleKindBasicAlertRuleTemplateValues()
+}
func PossibleKindBasicDataConnectorValues() []KindBasicDataConnector {
return original.PossibleKindBasicDataConnectorValues()
}
@@ -485,12 +785,21 @@ func PossibleLicenseStatusValues() []LicenseStatus {
func PossibleOSFamilyValues() []OSFamily {
return original.PossibleOSFamilyValues()
}
+func PossibleRegistryHiveValues() []RegistryHive {
+ return original.PossibleRegistryHiveValues()
+}
+func PossibleRegistryValueKindValues() []RegistryValueKind {
+ return original.PossibleRegistryValueKindValues()
+}
func PossibleSettingKindValues() []SettingKind {
return original.PossibleSettingKindValues()
}
func PossibleStatusInMcasValues() []StatusInMcas {
return original.PossibleStatusInMcasValues()
}
+func PossibleTemplateStatusValues() []TemplateStatus {
+ return original.PossibleTemplateStatusValues()
+}
func PossibleTriggerOperatorValues() []TriggerOperator {
return original.PossibleTriggerOperatorValues()
}
diff --git a/profiles/preview/preview/securityinsight/mgmt/securityinsight/securityinsightapi/models.go b/profiles/preview/preview/securityinsight/mgmt/securityinsight/securityinsightapi/models.go
index 9bd2fc7dc959..8ab6cea80eac 100644
--- a/profiles/preview/preview/securityinsight/mgmt/securityinsight/securityinsightapi/models.go
+++ b/profiles/preview/preview/securityinsight/mgmt/securityinsight/securityinsightapi/models.go
@@ -22,10 +22,13 @@ package securityinsightapi
import original "github.com/Azure/azure-sdk-for-go/services/preview/securityinsight/mgmt/2017-08-01-preview/securityinsight/securityinsightapi"
type ActionsClientAPI = original.ActionsClientAPI
+type AlertRuleTemplatesClientAPI = original.AlertRuleTemplatesClientAPI
type AlertRulesClientAPI = original.AlertRulesClientAPI
type BookmarksClientAPI = original.BookmarksClientAPI
+type CaseCommentsClientAPI = original.CaseCommentsClientAPI
type CasesAggregationsClientAPI = original.CasesAggregationsClientAPI
type CasesClientAPI = original.CasesClientAPI
+type CommentsClientAPI = original.CommentsClientAPI
type DataConnectorsClientAPI = original.DataConnectorsClientAPI
type EntitiesClientAPI = original.EntitiesClientAPI
type EntityQueriesClientAPI = original.EntityQueriesClientAPI
diff --git a/profiles/preview/preview/sql/mgmt/sql/models.go b/profiles/preview/preview/sql/mgmt/sql/models.go
index 9d61b25ad2d7..e6d4fe6eee40 100644
--- a/profiles/preview/preview/sql/mgmt/sql/models.go
+++ b/profiles/preview/preview/sql/mgmt/sql/models.go
@@ -309,6 +309,7 @@ type ManagedInstanceEncryptionProtectorListResultPage = original.ManagedInstance
type ManagedInstanceEncryptionProtectorProperties = original.ManagedInstanceEncryptionProtectorProperties
type ManagedInstanceEncryptionProtectorsClient = original.ManagedInstanceEncryptionProtectorsClient
type ManagedInstanceEncryptionProtectorsCreateOrUpdateFuture = original.ManagedInstanceEncryptionProtectorsCreateOrUpdateFuture
+type ManagedInstanceEncryptionProtectorsRevalidateFuture = original.ManagedInstanceEncryptionProtectorsRevalidateFuture
type ManagedInstanceFamilyCapability = original.ManagedInstanceFamilyCapability
type ManagedInstanceKey = original.ManagedInstanceKey
type ManagedInstanceKeyListResult = original.ManagedInstanceKeyListResult
diff --git a/profiles/preview/resources/mgmt/policy/models.go b/profiles/preview/resources/mgmt/policy/models.go
index 85b79b003622..8827d97f90d7 100644
--- a/profiles/preview/resources/mgmt/policy/models.go
+++ b/profiles/preview/resources/mgmt/policy/models.go
@@ -22,13 +22,20 @@ package policy
import (
"context"
- original "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-01-01/policy"
+ original "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/policy"
)
const (
DefaultBaseURI = original.DefaultBaseURI
)
+type EnforcementMode = original.EnforcementMode
+
+const (
+ Default EnforcementMode = original.Default
+ DoNotEnforce EnforcementMode = original.DoNotEnforce
+)
+
type ResourceIdentityType = original.ResourceIdentityType
const (
@@ -110,6 +117,9 @@ func NewSetDefinitionsClientWithBaseURI(baseURI string, subscriptionID string) S
func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient {
return original.NewWithBaseURI(baseURI, subscriptionID)
}
+func PossibleEnforcementModeValues() []EnforcementMode {
+ return original.PossibleEnforcementModeValues()
+}
func PossibleResourceIdentityTypeValues() []ResourceIdentityType {
return original.PossibleResourceIdentityTypeValues()
}
diff --git a/profiles/preview/resources/mgmt/policy/policyapi/models.go b/profiles/preview/resources/mgmt/policy/policyapi/models.go
index a4c9803aa76a..be7cec1ae017 100644
--- a/profiles/preview/resources/mgmt/policy/policyapi/models.go
+++ b/profiles/preview/resources/mgmt/policy/policyapi/models.go
@@ -19,7 +19,7 @@
package policyapi
-import original "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-01-01/policy/policyapi"
+import original "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/policy/policyapi"
type AssignmentsClientAPI = original.AssignmentsClientAPI
type DefinitionsClientAPI = original.DefinitionsClientAPI
diff --git a/profiles/preview/resources/mgmt/resources/models.go b/profiles/preview/resources/mgmt/resources/models.go
index b25109161d78..34bfcc4a4866 100644
--- a/profiles/preview/resources/mgmt/resources/models.go
+++ b/profiles/preview/resources/mgmt/resources/models.go
@@ -57,6 +57,7 @@ type AliasType = original.AliasType
type BaseClient = original.BaseClient
type BasicDependency = original.BasicDependency
type Client = original.Client
+type CloudError = original.CloudError
type CreateOrUpdateByIDFuture = original.CreateOrUpdateByIDFuture
type CreateOrUpdateFuture = original.CreateOrUpdateFuture
type DebugSetting = original.DebugSetting
@@ -86,6 +87,8 @@ type DeploymentsCreateOrUpdateFuture = original.DeploymentsCreateOrUpdateFuture
type DeploymentsDeleteAtManagementGroupScopeFuture = original.DeploymentsDeleteAtManagementGroupScopeFuture
type DeploymentsDeleteAtSubscriptionScopeFuture = original.DeploymentsDeleteAtSubscriptionScopeFuture
type DeploymentsDeleteFuture = original.DeploymentsDeleteFuture
+type ErrorAdditionalInfo = original.ErrorAdditionalInfo
+type ErrorResponse = original.ErrorResponse
type ExportTemplateRequest = original.ExportTemplateRequest
type GenericResource = original.GenericResource
type GenericResourceFilter = original.GenericResourceFilter
@@ -136,6 +139,7 @@ type TagsListResult = original.TagsListResult
type TagsListResultIterator = original.TagsListResultIterator
type TagsListResultPage = original.TagsListResultPage
type TargetResource = original.TargetResource
+type TemplateHashResult = original.TemplateHashResult
type TemplateLink = original.TemplateLink
type UpdateByIDFuture = original.UpdateByIDFuture
type UpdateFuture = original.UpdateFuture
diff --git a/profiles/preview/servicefabric/mgmt/servicefabric/models.go b/profiles/preview/servicefabric/mgmt/servicefabric/models.go
index 651ca35ef2dd..1646871573e3 100644
--- a/profiles/preview/servicefabric/mgmt/servicefabric/models.go
+++ b/profiles/preview/servicefabric/mgmt/servicefabric/models.go
@@ -36,6 +36,13 @@ const (
SharedProcess ArmServicePackageActivationMode = original.SharedProcess
)
+type ArmUpgradeFailureAction = original.ArmUpgradeFailureAction
+
+const (
+ Manual ArmUpgradeFailureAction = original.Manual
+ Rollback ArmUpgradeFailureAction = original.Rollback
+)
+
type ClusterState = original.ClusterState
const (
@@ -184,8 +191,8 @@ const (
type UpgradeMode = original.UpgradeMode
const (
- Automatic UpgradeMode = original.Automatic
- Manual UpgradeMode = original.Manual
+ UpgradeModeAutomatic UpgradeMode = original.UpgradeModeAutomatic
+ UpgradeModeManual UpgradeMode = original.UpgradeModeManual
)
type UpgradeMode1 = original.UpgradeMode1
@@ -236,16 +243,18 @@ type ApplicationTypeVersionResource = original.ApplicationTypeVersionResource
type ApplicationTypeVersionResourceList = original.ApplicationTypeVersionResourceList
type ApplicationTypeVersionResourceProperties = original.ApplicationTypeVersionResourceProperties
type ApplicationTypeVersionsClient = original.ApplicationTypeVersionsClient
-type ApplicationTypeVersionsCreateFuture = original.ApplicationTypeVersionsCreateFuture
+type ApplicationTypeVersionsCreateOrUpdateFuture = original.ApplicationTypeVersionsCreateOrUpdateFuture
type ApplicationTypeVersionsDeleteFuture = original.ApplicationTypeVersionsDeleteFuture
type ApplicationTypesClient = original.ApplicationTypesClient
type ApplicationTypesDeleteFuture = original.ApplicationTypesDeleteFuture
type ApplicationUpgradePolicy = original.ApplicationUpgradePolicy
type ApplicationsClient = original.ApplicationsClient
-type ApplicationsCreateFuture = original.ApplicationsCreateFuture
+type ApplicationsCreateOrUpdateFuture = original.ApplicationsCreateOrUpdateFuture
type ApplicationsDeleteFuture = original.ApplicationsDeleteFuture
type ApplicationsUpdateFuture = original.ApplicationsUpdateFuture
type ArmApplicationHealthPolicy = original.ArmApplicationHealthPolicy
+type ArmRollingUpgradeMonitoringPolicy = original.ArmRollingUpgradeMonitoringPolicy
+type ArmServiceTypeHealthPolicy = original.ArmServiceTypeHealthPolicy
type AvailableOperationDisplay = original.AvailableOperationDisplay
type AzureActiveDirectory = original.AzureActiveDirectory
type BaseClient = original.BaseClient
@@ -269,7 +278,7 @@ type ClusterUpgradePolicy = original.ClusterUpgradePolicy
type ClusterVersionDetails = original.ClusterVersionDetails
type ClusterVersionsClient = original.ClusterVersionsClient
type ClustersClient = original.ClustersClient
-type ClustersCreateFuture = original.ClustersCreateFuture
+type ClustersCreateOrUpdateFuture = original.ClustersCreateOrUpdateFuture
type ClustersUpdateFuture = original.ClustersUpdateFuture
type DiagnosticsStorageAccountConfig = original.DiagnosticsStorageAccountConfig
type EndpointRangeDescription = original.EndpointRangeDescription
@@ -285,7 +294,6 @@ type OperationsClient = original.OperationsClient
type PartitionSchemeDescription = original.PartitionSchemeDescription
type ProxyResource = original.ProxyResource
type Resource = original.Resource
-type RollingUpgradeMonitoringPolicy = original.RollingUpgradeMonitoringPolicy
type ServerCertificateCommonName = original.ServerCertificateCommonName
type ServerCertificateCommonNames = original.ServerCertificateCommonNames
type ServiceCorrelationDescription = original.ServiceCorrelationDescription
@@ -300,7 +308,7 @@ type ServiceResourceUpdateProperties = original.ServiceResourceUpdateProperties
type ServiceTypeDeltaHealthPolicy = original.ServiceTypeDeltaHealthPolicy
type ServiceTypeHealthPolicy = original.ServiceTypeHealthPolicy
type ServicesClient = original.ServicesClient
-type ServicesCreateFuture = original.ServicesCreateFuture
+type ServicesCreateOrUpdateFuture = original.ServicesCreateOrUpdateFuture
type ServicesDeleteFuture = original.ServicesDeleteFuture
type ServicesUpdateFuture = original.ServicesUpdateFuture
type SettingsParameterDescription = original.SettingsParameterDescription
@@ -369,6 +377,9 @@ func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient {
func PossibleArmServicePackageActivationModeValues() []ArmServicePackageActivationMode {
return original.PossibleArmServicePackageActivationModeValues()
}
+func PossibleArmUpgradeFailureActionValues() []ArmUpgradeFailureAction {
+ return original.PossibleArmUpgradeFailureActionValues()
+}
func PossibleClusterStateValues() []ClusterState {
return original.PossibleClusterStateValues()
}
diff --git a/profiles/preview/storage/mgmt/storage/models.go b/profiles/preview/storage/mgmt/storage/models.go
index 25cbf490e4b5..e95058a8ae40 100644
--- a/profiles/preview/storage/mgmt/storage/models.go
+++ b/profiles/preview/storage/mgmt/storage/models.go
@@ -296,6 +296,7 @@ type AzureFilesIdentityBasedAuthentication = original.AzureFilesIdentityBasedAut
type BaseClient = original.BaseClient
type BlobContainer = original.BlobContainer
type BlobContainersClient = original.BlobContainersClient
+type BlobServiceItems = original.BlobServiceItems
type BlobServiceProperties = original.BlobServiceProperties
type BlobServicePropertiesProperties = original.BlobServicePropertiesProperties
type BlobServicesClient = original.BlobServicesClient
@@ -312,6 +313,19 @@ type Encryption = original.Encryption
type EncryptionService = original.EncryptionService
type EncryptionServices = original.EncryptionServices
type Endpoints = original.Endpoints
+type ErrorResponse = original.ErrorResponse
+type ErrorResponseBody = original.ErrorResponseBody
+type FileServiceItems = original.FileServiceItems
+type FileServiceProperties = original.FileServiceProperties
+type FileServicePropertiesProperties = original.FileServicePropertiesProperties
+type FileServicesClient = original.FileServicesClient
+type FileShare = original.FileShare
+type FileShareItem = original.FileShareItem
+type FileShareItems = original.FileShareItems
+type FileShareItemsIterator = original.FileShareItemsIterator
+type FileShareItemsPage = original.FileShareItemsPage
+type FileShareProperties = original.FileShareProperties
+type FileSharesClient = original.FileSharesClient
type GeoReplicationStats = original.GeoReplicationStats
type IPRule = original.IPRule
type Identity = original.Identity
@@ -385,6 +399,24 @@ func NewBlobServicesClient(subscriptionID string) BlobServicesClient {
func NewBlobServicesClientWithBaseURI(baseURI string, subscriptionID string) BlobServicesClient {
return original.NewBlobServicesClientWithBaseURI(baseURI, subscriptionID)
}
+func NewFileServicesClient(subscriptionID string) FileServicesClient {
+ return original.NewFileServicesClient(subscriptionID)
+}
+func NewFileServicesClientWithBaseURI(baseURI string, subscriptionID string) FileServicesClient {
+ return original.NewFileServicesClientWithBaseURI(baseURI, subscriptionID)
+}
+func NewFileShareItemsIterator(page FileShareItemsPage) FileShareItemsIterator {
+ return original.NewFileShareItemsIterator(page)
+}
+func NewFileShareItemsPage(getNextPage func(context.Context, FileShareItems) (FileShareItems, error)) FileShareItemsPage {
+ return original.NewFileShareItemsPage(getNextPage)
+}
+func NewFileSharesClient(subscriptionID string) FileSharesClient {
+ return original.NewFileSharesClient(subscriptionID)
+}
+func NewFileSharesClientWithBaseURI(baseURI string, subscriptionID string) FileSharesClient {
+ return original.NewFileSharesClientWithBaseURI(baseURI, subscriptionID)
+}
func NewListContainerItemsIterator(page ListContainerItemsPage) ListContainerItemsIterator {
return original.NewListContainerItemsIterator(page)
}
diff --git a/profiles/preview/storage/mgmt/storage/storageapi/models.go b/profiles/preview/storage/mgmt/storage/storageapi/models.go
index c11d9c8b5aa7..d9a8d0c91f8f 100644
--- a/profiles/preview/storage/mgmt/storage/storageapi/models.go
+++ b/profiles/preview/storage/mgmt/storage/storageapi/models.go
@@ -24,6 +24,8 @@ import original "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04
type AccountsClientAPI = original.AccountsClientAPI
type BlobContainersClientAPI = original.BlobContainersClientAPI
type BlobServicesClientAPI = original.BlobServicesClientAPI
+type FileServicesClientAPI = original.FileServicesClientAPI
+type FileSharesClientAPI = original.FileSharesClientAPI
type ManagementPoliciesClientAPI = original.ManagementPoliciesClientAPI
type OperationsClientAPI = original.OperationsClientAPI
type SkusClientAPI = original.SkusClientAPI
diff --git a/services/appinsights/mgmt/2015-05-01/insights/annotations.go b/services/appinsights/mgmt/2015-05-01/insights/annotations.go
index cc478c3a72ea..c6cbb32e7e91 100644
--- a/services/appinsights/mgmt/2015-05-01/insights/annotations.go
+++ b/services/appinsights/mgmt/2015-05-01/insights/annotations.go
@@ -137,13 +137,13 @@ func (client AnnotationsClient) CreateResponder(resp *http.Response) (result Lis
// resourceGroupName - the name of the resource group. The name is case insensitive.
// resourceName - the name of the Application Insights component resource.
// annotationID - the unique annotation ID. This is unique within a Application Insights component.
-func (client AnnotationsClient) Delete(ctx context.Context, resourceGroupName string, resourceName string, annotationID string) (result SetObject, err error) {
+func (client AnnotationsClient) Delete(ctx context.Context, resourceGroupName string, resourceName string, annotationID string) (result autorest.Response, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/AnnotationsClient.Delete")
defer func() {
sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
+ if result.Response != nil {
+ sc = result.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -166,7 +166,7 @@ func (client AnnotationsClient) Delete(ctx context.Context, resourceGroupName st
resp, err := client.DeleteSender(req)
if err != nil {
- result.Response = autorest.Response{Response: resp}
+ result.Response = resp
err = autorest.NewErrorWithError(err, "insights.AnnotationsClient", "Delete", resp, "Failure sending request")
return
}
@@ -210,14 +210,13 @@ func (client AnnotationsClient) DeleteSender(req *http.Request) (*http.Response,
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
-func (client AnnotationsClient) DeleteResponder(resp *http.Response) (result SetObject, err error) {
+func (client AnnotationsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result.Value),
autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
+ result.Response = resp
return
}
diff --git a/services/appinsights/mgmt/2015-05-01/insights/insightsapi/interfaces.go b/services/appinsights/mgmt/2015-05-01/insights/insightsapi/interfaces.go
index e7d2c7ad86a7..38f48876d203 100644
--- a/services/appinsights/mgmt/2015-05-01/insights/insightsapi/interfaces.go
+++ b/services/appinsights/mgmt/2015-05-01/insights/insightsapi/interfaces.go
@@ -33,7 +33,7 @@ var _ OperationsClientAPI = (*insights.OperationsClient)(nil)
// AnnotationsClientAPI contains the set of methods on the AnnotationsClient type.
type AnnotationsClientAPI interface {
Create(ctx context.Context, resourceGroupName string, resourceName string, annotationProperties insights.Annotation) (result insights.ListAnnotation, err error)
- Delete(ctx context.Context, resourceGroupName string, resourceName string, annotationID string) (result insights.SetObject, err error)
+ Delete(ctx context.Context, resourceGroupName string, resourceName string, annotationID string) (result autorest.Response, err error)
Get(ctx context.Context, resourceGroupName string, resourceName string, annotationID string) (result insights.ListAnnotation, err error)
List(ctx context.Context, resourceGroupName string, resourceName string, start string, end string) (result insights.AnnotationsListResult, err error)
}
@@ -116,7 +116,7 @@ var _ ComponentsClientAPI = (*insights.ComponentsClient)(nil)
// WorkItemConfigurationsClientAPI contains the set of methods on the WorkItemConfigurationsClient type.
type WorkItemConfigurationsClientAPI interface {
Create(ctx context.Context, resourceGroupName string, resourceName string, workItemConfigurationProperties insights.WorkItemCreateConfiguration) (result insights.WorkItemConfiguration, err error)
- Delete(ctx context.Context, resourceGroupName string, resourceName string, workItemConfigID string) (result insights.SetObject, err error)
+ Delete(ctx context.Context, resourceGroupName string, resourceName string, workItemConfigID string) (result autorest.Response, err error)
GetDefault(ctx context.Context, resourceGroupName string, resourceName string) (result insights.WorkItemConfiguration, err error)
GetItem(ctx context.Context, resourceGroupName string, resourceName string, workItemConfigID string) (result insights.WorkItemConfiguration, err error)
List(ctx context.Context, resourceGroupName string, resourceName string) (result insights.WorkItemConfigurationsListResult, err error)
diff --git a/services/appinsights/mgmt/2015-05-01/insights/models.go b/services/appinsights/mgmt/2015-05-01/insights/models.go
index 93a0f7d8dc9e..3c63e3253c28 100644
--- a/services/appinsights/mgmt/2015-05-01/insights/models.go
+++ b/services/appinsights/mgmt/2015-05-01/insights/models.go
@@ -1197,12 +1197,6 @@ func NewOperationListResultPage(getNextPage func(context.Context, OperationListR
return OperationListResultPage{fn: getNextPage}
}
-// SetObject ...
-type SetObject struct {
- autorest.Response `json:"-"`
- Value interface{} `json:"value,omitempty"`
-}
-
// TagsResource a container holding only the Tags for a resource, allowing the user to update the tags on a
// WebTest instance.
type TagsResource struct {
diff --git a/services/appinsights/mgmt/2015-05-01/insights/workitemconfigurations.go b/services/appinsights/mgmt/2015-05-01/insights/workitemconfigurations.go
index e445f49da908..709f900b6ba5 100644
--- a/services/appinsights/mgmt/2015-05-01/insights/workitemconfigurations.go
+++ b/services/appinsights/mgmt/2015-05-01/insights/workitemconfigurations.go
@@ -138,13 +138,13 @@ func (client WorkItemConfigurationsClient) CreateResponder(resp *http.Response)
// resourceName - the name of the Application Insights component resource.
// workItemConfigID - the unique work item configuration Id. This can be either friendly name of connector as
// defined in connector configuration
-func (client WorkItemConfigurationsClient) Delete(ctx context.Context, resourceGroupName string, resourceName string, workItemConfigID string) (result SetObject, err error) {
+func (client WorkItemConfigurationsClient) Delete(ctx context.Context, resourceGroupName string, resourceName string, workItemConfigID string) (result autorest.Response, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/WorkItemConfigurationsClient.Delete")
defer func() {
sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
+ if result.Response != nil {
+ sc = result.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
@@ -167,7 +167,7 @@ func (client WorkItemConfigurationsClient) Delete(ctx context.Context, resourceG
resp, err := client.DeleteSender(req)
if err != nil {
- result.Response = autorest.Response{Response: resp}
+ result.Response = resp
err = autorest.NewErrorWithError(err, "insights.WorkItemConfigurationsClient", "Delete", resp, "Failure sending request")
return
}
@@ -211,14 +211,13 @@ func (client WorkItemConfigurationsClient) DeleteSender(req *http.Request) (*htt
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
-func (client WorkItemConfigurationsClient) DeleteResponder(resp *http.Response) (result SetObject, err error) {
+func (client WorkItemConfigurationsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
- autorest.ByUnmarshallingJSON(&result.Value),
autorest.ByClosing())
- result.Response = autorest.Response{Response: resp}
+ result.Response = resp
return
}
diff --git a/services/batch/2019-08-01.10.0/batch/account.go b/services/batch/2019-08-01.10.0/batch/account.go
new file mode 100644
index 000000000000..2528c7be5a23
--- /dev/null
+++ b/services/batch/2019-08-01.10.0/batch/account.go
@@ -0,0 +1,356 @@
+package batch
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/date"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "github.com/satori/go.uuid"
+ "net/http"
+)
+
+// AccountClient is the a client for issuing REST requests to the Azure Batch service.
+type AccountClient struct {
+ BaseClient
+}
+
+// NewAccountClient creates an instance of the AccountClient client.
+func NewAccountClient(batchURL string) AccountClient {
+ return AccountClient{New(batchURL)}
+}
+
+// ListPoolNodeCounts gets the number of Compute Nodes in each state, grouped by Pool.
+// Parameters:
+// filter - an OData $filter clause. For more information on constructing this filter, see
+// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch.
+// maxResults - the maximum number of items to return in the response.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client AccountClient) ListPoolNodeCounts(ctx context.Context, filter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result PoolNodeCountsListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountClient.ListPoolNodeCounts")
+ defer func() {
+ sc := -1
+ if result.pnclr.Response.Response != nil {
+ sc = result.pnclr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: maxResults,
+ Constraints: []validation.Constraint{{Target: "maxResults", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "maxResults", Name: validation.InclusiveMaximum, Rule: int64(10), Chain: nil},
+ {Target: "maxResults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
+ }}}}}); err != nil {
+ return result, validation.NewError("batch.AccountClient", "ListPoolNodeCounts", err.Error())
+ }
+
+ result.fn = client.listPoolNodeCountsNextResults
+ req, err := client.ListPoolNodeCountsPreparer(ctx, filter, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "ListPoolNodeCounts", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListPoolNodeCountsSender(req)
+ if err != nil {
+ result.pnclr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "ListPoolNodeCounts", resp, "Failure sending request")
+ return
+ }
+
+ result.pnclr, err = client.ListPoolNodeCountsResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "ListPoolNodeCounts", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPoolNodeCountsPreparer prepares the ListPoolNodeCounts request.
+func (client AccountClient) ListPoolNodeCountsPreparer(ctx context.Context, filter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
+ if maxResults != nil {
+ queryParameters["maxresults"] = autorest.Encode("query", *maxResults)
+ } else {
+ queryParameters["maxresults"] = autorest.Encode("query", 10)
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPath("/nodecounts"),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListPoolNodeCountsSender sends the ListPoolNodeCounts request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountClient) ListPoolNodeCountsSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListPoolNodeCountsResponder handles the response to the ListPoolNodeCounts request. The method always
+// closes the http.Response Body.
+func (client AccountClient) ListPoolNodeCountsResponder(resp *http.Response) (result PoolNodeCountsListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listPoolNodeCountsNextResults retrieves the next set of results, if any.
+func (client AccountClient) listPoolNodeCountsNextResults(ctx context.Context, lastResults PoolNodeCountsListResult) (result PoolNodeCountsListResult, err error) {
+ req, err := lastResults.poolNodeCountsListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "batch.AccountClient", "listPoolNodeCountsNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListPoolNodeCountsSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "batch.AccountClient", "listPoolNodeCountsNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListPoolNodeCountsResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "listPoolNodeCountsNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListPoolNodeCountsComplete enumerates all values, automatically crossing page boundaries as required.
+func (client AccountClient) ListPoolNodeCountsComplete(ctx context.Context, filter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result PoolNodeCountsListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountClient.ListPoolNodeCounts")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListPoolNodeCounts(ctx, filter, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ return
+}
+
+// ListSupportedImages sends the list supported images request.
+// Parameters:
+// filter - an OData $filter clause. For more information on constructing this filter, see
+// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images.
+// maxResults - the maximum number of items to return in the response. A maximum of 1000 results will be
+// returned.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client AccountClient) ListSupportedImages(ctx context.Context, filter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result AccountListSupportedImagesResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountClient.ListSupportedImages")
+ defer func() {
+ sc := -1
+ if result.alsir.Response.Response != nil {
+ sc = result.alsir.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: maxResults,
+ Constraints: []validation.Constraint{{Target: "maxResults", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "maxResults", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil},
+ {Target: "maxResults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
+ }}}}}); err != nil {
+ return result, validation.NewError("batch.AccountClient", "ListSupportedImages", err.Error())
+ }
+
+ result.fn = client.listSupportedImagesNextResults
+ req, err := client.ListSupportedImagesPreparer(ctx, filter, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "ListSupportedImages", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSupportedImagesSender(req)
+ if err != nil {
+ result.alsir.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "ListSupportedImages", resp, "Failure sending request")
+ return
+ }
+
+ result.alsir, err = client.ListSupportedImagesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "ListSupportedImages", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListSupportedImagesPreparer prepares the ListSupportedImages request.
+func (client AccountClient) ListSupportedImagesPreparer(ctx context.Context, filter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
+ if maxResults != nil {
+ queryParameters["maxresults"] = autorest.Encode("query", *maxResults)
+ } else {
+ queryParameters["maxresults"] = autorest.Encode("query", 1000)
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPath("/supportedimages"),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSupportedImagesSender sends the ListSupportedImages request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountClient) ListSupportedImagesSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListSupportedImagesResponder handles the response to the ListSupportedImages request. The method always
+// closes the http.Response Body.
+func (client AccountClient) ListSupportedImagesResponder(resp *http.Response) (result AccountListSupportedImagesResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listSupportedImagesNextResults retrieves the next set of results, if any.
+func (client AccountClient) listSupportedImagesNextResults(ctx context.Context, lastResults AccountListSupportedImagesResult) (result AccountListSupportedImagesResult, err error) {
+ req, err := lastResults.accountListSupportedImagesResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "batch.AccountClient", "listSupportedImagesNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSupportedImagesSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "batch.AccountClient", "listSupportedImagesNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListSupportedImagesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "listSupportedImagesNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListSupportedImagesComplete enumerates all values, automatically crossing page boundaries as required.
+func (client AccountClient) ListSupportedImagesComplete(ctx context.Context, filter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result AccountListSupportedImagesResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountClient.ListSupportedImages")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListSupportedImages(ctx, filter, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ return
+}
diff --git a/services/batch/2019-08-01.10.0/batch/application.go b/services/batch/2019-08-01.10.0/batch/application.go
new file mode 100644
index 000000000000..b7c2951cf635
--- /dev/null
+++ b/services/batch/2019-08-01.10.0/batch/application.go
@@ -0,0 +1,302 @@
+package batch
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/date"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "github.com/satori/go.uuid"
+ "net/http"
+)
+
+// ApplicationClient is the a client for issuing REST requests to the Azure Batch service.
+type ApplicationClient struct {
+ BaseClient
+}
+
+// NewApplicationClient creates an instance of the ApplicationClient client.
+func NewApplicationClient(batchURL string) ApplicationClient {
+ return ApplicationClient{New(batchURL)}
+}
+
+// Get this operation returns only Applications and versions that are available for use on Compute Nodes; that is, that
+// can be used in an Package reference. For administrator information about Applications and versions that are not yet
+// available to Compute Nodes, use the Azure portal or the Azure Resource Manager API.
+// Parameters:
+// applicationID - the ID of the Application.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client ApplicationClient) Get(ctx context.Context, applicationID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result ApplicationSummary, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, applicationID, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ApplicationClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.ApplicationClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ApplicationClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client ApplicationClient) GetPreparer(ctx context.Context, applicationID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "applicationId": autorest.Encode("path", applicationID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/applications/{applicationId}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client ApplicationClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client ApplicationClient) GetResponder(resp *http.Response) (result ApplicationSummary, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List this operation returns only Applications and versions that are available for use on Compute Nodes; that is,
+// that can be used in an Package reference. For administrator information about applications and versions that are not
+// yet available to Compute Nodes, use the Azure portal or the Azure Resource Manager API.
+// Parameters:
+// maxResults - the maximum number of items to return in the response. A maximum of 1000 applications can be
+// returned.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client ApplicationClient) List(ctx context.Context, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result ApplicationListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationClient.List")
+ defer func() {
+ sc := -1
+ if result.alr.Response.Response != nil {
+ sc = result.alr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: maxResults,
+ Constraints: []validation.Constraint{{Target: "maxResults", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "maxResults", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil},
+ {Target: "maxResults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
+ }}}}}); err != nil {
+ return result, validation.NewError("batch.ApplicationClient", "List", err.Error())
+ }
+
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ApplicationClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.alr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.ApplicationClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.alr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ApplicationClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client ApplicationClient) ListPreparer(ctx context.Context, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if maxResults != nil {
+ queryParameters["maxresults"] = autorest.Encode("query", *maxResults)
+ } else {
+ queryParameters["maxresults"] = autorest.Encode("query", 1000)
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPath("/applications"),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client ApplicationClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client ApplicationClient) ListResponder(resp *http.Response) (result ApplicationListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client ApplicationClient) listNextResults(ctx context.Context, lastResults ApplicationListResult) (result ApplicationListResult, err error) {
+ req, err := lastResults.applicationListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "batch.ApplicationClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "batch.ApplicationClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ApplicationClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client ApplicationClient) ListComplete(ctx context.Context, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result ApplicationListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ return
+}
diff --git a/services/batch/2019-08-01.10.0/batch/batchapi/interfaces.go b/services/batch/2019-08-01.10.0/batch/batchapi/interfaces.go
new file mode 100644
index 000000000000..210ca82a278a
--- /dev/null
+++ b/services/batch/2019-08-01.10.0/batch/batchapi/interfaces.go
@@ -0,0 +1,156 @@
+package batchapi
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/azure-sdk-for-go/services/batch/2019-08-01.10.0/batch"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/date"
+ "github.com/satori/go.uuid"
+)
+
+// ApplicationClientAPI contains the set of methods on the ApplicationClient type.
+type ApplicationClientAPI interface {
+ Get(ctx context.Context, applicationID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.ApplicationSummary, err error)
+ List(ctx context.Context, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.ApplicationListResultPage, err error)
+}
+
+var _ ApplicationClientAPI = (*batch.ApplicationClient)(nil)
+
+// PoolClientAPI contains the set of methods on the PoolClient type.
+type PoolClientAPI interface {
+ Add(ctx context.Context, pool batch.PoolAddParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error)
+ Delete(ctx context.Context, poolID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error)
+ DisableAutoScale(ctx context.Context, poolID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error)
+ EnableAutoScale(ctx context.Context, poolID string, poolEnableAutoScaleParameter batch.PoolEnableAutoScaleParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error)
+ EvaluateAutoScale(ctx context.Context, poolID string, poolEvaluateAutoScaleParameter batch.PoolEvaluateAutoScaleParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.AutoScaleRun, err error)
+ Exists(ctx context.Context, poolID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error)
+ Get(ctx context.Context, poolID string, selectParameter string, expand string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result batch.CloudPool, err error)
+ GetAllLifetimeStatistics(ctx context.Context, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.PoolStatistics, err error)
+ List(ctx context.Context, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.CloudPoolListResultPage, err error)
+ ListUsageMetrics(ctx context.Context, startTime *date.Time, endTime *date.Time, filter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.PoolListUsageMetricsResultPage, err error)
+ Patch(ctx context.Context, poolID string, poolPatchParameter batch.PoolPatchParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error)
+ RemoveNodes(ctx context.Context, poolID string, nodeRemoveParameter batch.NodeRemoveParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error)
+ Resize(ctx context.Context, poolID string, poolResizeParameter batch.PoolResizeParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error)
+ StopResize(ctx context.Context, poolID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error)
+ UpdateProperties(ctx context.Context, poolID string, poolUpdatePropertiesParameter batch.PoolUpdatePropertiesParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error)
+}
+
+var _ PoolClientAPI = (*batch.PoolClient)(nil)
+
+// AccountClientAPI contains the set of methods on the AccountClient type.
+type AccountClientAPI interface {
+ ListPoolNodeCounts(ctx context.Context, filter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.PoolNodeCountsListResultPage, err error)
+ ListSupportedImages(ctx context.Context, filter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.AccountListSupportedImagesResultPage, err error)
+}
+
+var _ AccountClientAPI = (*batch.AccountClient)(nil)
+
+// JobClientAPI contains the set of methods on the JobClient type.
+type JobClientAPI interface {
+ Add(ctx context.Context, job batch.JobAddParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error)
+ Delete(ctx context.Context, jobID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error)
+ Disable(ctx context.Context, jobID string, jobDisableParameter batch.JobDisableParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error)
+ Enable(ctx context.Context, jobID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error)
+ Get(ctx context.Context, jobID string, selectParameter string, expand string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result batch.CloudJob, err error)
+ GetAllLifetimeStatistics(ctx context.Context, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.JobStatistics, err error)
+ GetTaskCounts(ctx context.Context, jobID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.TaskCounts, err error)
+ List(ctx context.Context, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.CloudJobListResultPage, err error)
+ ListFromJobSchedule(ctx context.Context, jobScheduleID string, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.CloudJobListResultPage, err error)
+ ListPreparationAndReleaseTaskStatus(ctx context.Context, jobID string, filter string, selectParameter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.CloudJobListPreparationAndReleaseTaskStatusResultPage, err error)
+ Patch(ctx context.Context, jobID string, jobPatchParameter batch.JobPatchParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error)
+ Terminate(ctx context.Context, jobID string, jobTerminateParameter *batch.JobTerminateParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error)
+ Update(ctx context.Context, jobID string, jobUpdateParameter batch.JobUpdateParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error)
+}
+
+var _ JobClientAPI = (*batch.JobClient)(nil)
+
+// CertificateClientAPI contains the set of methods on the CertificateClient type.
+type CertificateClientAPI interface {
+ Add(ctx context.Context, certificate batch.CertificateAddParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error)
+ CancelDeletion(ctx context.Context, thumbprintAlgorithm string, thumbprint string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error)
+ Delete(ctx context.Context, thumbprintAlgorithm string, thumbprint string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error)
+ Get(ctx context.Context, thumbprintAlgorithm string, thumbprint string, selectParameter string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.Certificate, err error)
+ List(ctx context.Context, filter string, selectParameter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.CertificateListResultPage, err error)
+}
+
+var _ CertificateClientAPI = (*batch.CertificateClient)(nil)
+
+// FileClientAPI contains the set of methods on the FileClient type.
+type FileClientAPI interface {
+ DeleteFromComputeNode(ctx context.Context, poolID string, nodeID string, filePath string, recursive *bool, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error)
+ DeleteFromTask(ctx context.Context, jobID string, taskID string, filePath string, recursive *bool, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error)
+ GetFromComputeNode(ctx context.Context, poolID string, nodeID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ocpRange string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result batch.ReadCloser, err error)
+ GetFromTask(ctx context.Context, jobID string, taskID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ocpRange string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result batch.ReadCloser, err error)
+ GetPropertiesFromComputeNode(ctx context.Context, poolID string, nodeID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error)
+ GetPropertiesFromTask(ctx context.Context, jobID string, taskID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error)
+ ListFromComputeNode(ctx context.Context, poolID string, nodeID string, filter string, recursive *bool, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.NodeFileListResultPage, err error)
+ ListFromTask(ctx context.Context, jobID string, taskID string, filter string, recursive *bool, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.NodeFileListResultPage, err error)
+}
+
+var _ FileClientAPI = (*batch.FileClient)(nil)
+
+// JobScheduleClientAPI contains the set of methods on the JobScheduleClient type.
+type JobScheduleClientAPI interface {
+ Add(ctx context.Context, cloudJobSchedule batch.JobScheduleAddParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error)
+ Delete(ctx context.Context, jobScheduleID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error)
+ Disable(ctx context.Context, jobScheduleID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error)
+ Enable(ctx context.Context, jobScheduleID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error)
+ Exists(ctx context.Context, jobScheduleID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error)
+ Get(ctx context.Context, jobScheduleID string, selectParameter string, expand string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result batch.CloudJobSchedule, err error)
+ List(ctx context.Context, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.CloudJobScheduleListResultPage, err error)
+ Patch(ctx context.Context, jobScheduleID string, jobSchedulePatchParameter batch.JobSchedulePatchParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error)
+ Terminate(ctx context.Context, jobScheduleID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error)
+ Update(ctx context.Context, jobScheduleID string, jobScheduleUpdateParameter batch.JobScheduleUpdateParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error)
+}
+
+var _ JobScheduleClientAPI = (*batch.JobScheduleClient)(nil)
+
+// TaskClientAPI contains the set of methods on the TaskClient type.
+type TaskClientAPI interface {
+ Add(ctx context.Context, jobID string, task batch.TaskAddParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error)
+ AddCollection(ctx context.Context, jobID string, taskCollection batch.TaskAddCollectionParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.TaskAddCollectionResult, err error)
+ Delete(ctx context.Context, jobID string, taskID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error)
+ Get(ctx context.Context, jobID string, taskID string, selectParameter string, expand string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result batch.CloudTask, err error)
+ List(ctx context.Context, jobID string, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.CloudTaskListResultPage, err error)
+ ListSubtasks(ctx context.Context, jobID string, taskID string, selectParameter string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.CloudTaskListSubtasksResult, err error)
+ Reactivate(ctx context.Context, jobID string, taskID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error)
+ Terminate(ctx context.Context, jobID string, taskID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error)
+ Update(ctx context.Context, jobID string, taskID string, taskUpdateParameter batch.TaskUpdateParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error)
+}
+
+var _ TaskClientAPI = (*batch.TaskClient)(nil)
+
+// ComputeNodeClientAPI contains the set of methods on the ComputeNodeClient type.
+type ComputeNodeClientAPI interface {
+ AddUser(ctx context.Context, poolID string, nodeID string, userParameter batch.ComputeNodeUser, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error)
+ DeleteUser(ctx context.Context, poolID string, nodeID string, userName string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error)
+ DisableScheduling(ctx context.Context, poolID string, nodeID string, nodeDisableSchedulingParameter *batch.NodeDisableSchedulingParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error)
+ EnableScheduling(ctx context.Context, poolID string, nodeID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error)
+ Get(ctx context.Context, poolID string, nodeID string, selectParameter string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.ComputeNode, err error)
+ GetRemoteDesktop(ctx context.Context, poolID string, nodeID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.ReadCloser, err error)
+ GetRemoteLoginSettings(ctx context.Context, poolID string, nodeID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.ComputeNodeGetRemoteLoginSettingsResult, err error)
+ List(ctx context.Context, poolID string, filter string, selectParameter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.ComputeNodeListResultPage, err error)
+ Reboot(ctx context.Context, poolID string, nodeID string, nodeRebootParameter *batch.NodeRebootParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error)
+ Reimage(ctx context.Context, poolID string, nodeID string, nodeReimageParameter *batch.NodeReimageParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error)
+ UpdateUser(ctx context.Context, poolID string, nodeID string, userName string, nodeUpdateUserParameter batch.NodeUpdateUserParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error)
+ UploadBatchServiceLogs(ctx context.Context, poolID string, nodeID string, uploadBatchServiceLogsConfiguration batch.UploadBatchServiceLogsConfiguration, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result batch.UploadBatchServiceLogsResult, err error)
+}
+
+var _ ComputeNodeClientAPI = (*batch.ComputeNodeClient)(nil)
diff --git a/services/batch/2019-08-01.10.0/batch/certificate.go b/services/batch/2019-08-01.10.0/batch/certificate.go
new file mode 100644
index 000000000000..97d09f211e2a
--- /dev/null
+++ b/services/batch/2019-08-01.10.0/batch/certificate.go
@@ -0,0 +1,645 @@
+package batch
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/date"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "github.com/satori/go.uuid"
+ "net/http"
+)
+
+// CertificateClient is the a client for issuing REST requests to the Azure Batch service.
+type CertificateClient struct {
+ BaseClient
+}
+
+// NewCertificateClient creates an instance of the CertificateClient client.
+func NewCertificateClient(batchURL string) CertificateClient {
+ return CertificateClient{New(batchURL)}
+}
+
+// Add sends the add request.
+// Parameters:
+// certificate - the Certificate to be added.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client CertificateClient) Add(ctx context.Context, certificate CertificateAddParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CertificateClient.Add")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: certificate,
+ Constraints: []validation.Constraint{{Target: "certificate.Thumbprint", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "certificate.ThumbprintAlgorithm", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "certificate.Data", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.CertificateClient", "Add", err.Error())
+ }
+
+ req, err := client.AddPreparer(ctx, certificate, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "Add", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.AddSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "Add", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.AddResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "Add", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// AddPreparer prepares the Add request.
+func (client CertificateClient) AddPreparer(ctx context.Context, certificate CertificateAddParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPath("/certificates"),
+ autorest.WithJSON(certificate),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// AddSender sends the Add request. The method will close the
+// http.Response Body if it receives an error.
+func (client CertificateClient) AddSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// AddResponder handles the response to the Add request. The method always
+// closes the http.Response Body.
+func (client CertificateClient) AddResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// CancelDeletion if you try to delete a Certificate that is being used by a Pool or Compute Node, the status of the
+// Certificate changes to deleteFailed. If you decide that you want to continue using the Certificate, you can use this
+// operation to set the status of the Certificate back to active. If you intend to delete the Certificate, you do not
+// need to run this operation after the deletion failed. You must make sure that the Certificate is not being used by
+// any resources, and then you can try again to delete the Certificate.
+// Parameters:
+// thumbprintAlgorithm - the algorithm used to derive the thumbprint parameter. This must be sha1.
+// thumbprint - the thumbprint of the Certificate being deleted.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client CertificateClient) CancelDeletion(ctx context.Context, thumbprintAlgorithm string, thumbprint string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CertificateClient.CancelDeletion")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.CancelDeletionPreparer(ctx, thumbprintAlgorithm, thumbprint, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "CancelDeletion", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CancelDeletionSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "CancelDeletion", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CancelDeletionResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "CancelDeletion", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CancelDeletionPreparer prepares the CancelDeletion request.
+func (client CertificateClient) CancelDeletionPreparer(ctx context.Context, thumbprintAlgorithm string, thumbprint string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "thumbprint": autorest.Encode("path", thumbprint),
+ "thumbprintAlgorithm": autorest.Encode("path", thumbprintAlgorithm),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})/canceldelete", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CancelDeletionSender sends the CancelDeletion request. The method will close the
+// http.Response Body if it receives an error.
+func (client CertificateClient) CancelDeletionSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CancelDeletionResponder handles the response to the CancelDeletion request. The method always
+// closes the http.Response Body.
+func (client CertificateClient) CancelDeletionResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Delete you cannot delete a Certificate if a resource (Pool or Compute Node) is using it. Before you can delete a
+// Certificate, you must therefore make sure that the Certificate is not associated with any existing Pools, the
+// Certificate is not installed on any Nodes (even if you remove a Certificate from a Pool, it is not removed from
+// existing Compute Nodes in that Pool until they restart), and no running Tasks depend on the Certificate. If you try
+// to delete a Certificate that is in use, the deletion fails. The Certificate status changes to deleteFailed. You can
+// use Cancel Delete Certificate to set the status back to active if you decide that you want to continue using the
+// Certificate.
+// Parameters:
+// thumbprintAlgorithm - the algorithm used to derive the thumbprint parameter. This must be sha1.
+// thumbprint - the thumbprint of the Certificate to be deleted.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client CertificateClient) Delete(ctx context.Context, thumbprintAlgorithm string, thumbprint string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CertificateClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, thumbprintAlgorithm, thumbprint, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client CertificateClient) DeletePreparer(ctx context.Context, thumbprintAlgorithm string, thumbprint string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "thumbprint": autorest.Encode("path", thumbprint),
+ "thumbprintAlgorithm": autorest.Encode("path", thumbprintAlgorithm),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client CertificateClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client CertificateClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets information about the specified Certificate.
+// Parameters:
+// thumbprintAlgorithm - the algorithm used to derive the thumbprint parameter. This must be sha1.
+// thumbprint - the thumbprint of the Certificate to get.
+// selectParameter - an OData $select clause.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client CertificateClient) Get(ctx context.Context, thumbprintAlgorithm string, thumbprint string, selectParameter string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result Certificate, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CertificateClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, thumbprintAlgorithm, thumbprint, selectParameter, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client CertificateClient) GetPreparer(ctx context.Context, thumbprintAlgorithm string, thumbprint string, selectParameter string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "thumbprint": autorest.Encode("path", thumbprint),
+ "thumbprintAlgorithm": autorest.Encode("path", thumbprintAlgorithm),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(selectParameter) > 0 {
+ queryParameters["$select"] = autorest.Encode("query", selectParameter)
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client CertificateClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client CertificateClient) GetResponder(resp *http.Response) (result Certificate, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List sends the list request.
+// Parameters:
+// filter - an OData $filter clause. For more information on constructing this filter, see
+// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-certificates.
+// selectParameter - an OData $select clause.
+// maxResults - the maximum number of items to return in the response. A maximum of 1000 Certificates can be
+// returned.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client CertificateClient) List(ctx context.Context, filter string, selectParameter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result CertificateListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CertificateClient.List")
+ defer func() {
+ sc := -1
+ if result.clr.Response.Response != nil {
+ sc = result.clr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: maxResults,
+ Constraints: []validation.Constraint{{Target: "maxResults", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "maxResults", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil},
+ {Target: "maxResults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
+ }}}}}); err != nil {
+ return result, validation.NewError("batch.CertificateClient", "List", err.Error())
+ }
+
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx, filter, selectParameter, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.clr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.clr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client CertificateClient) ListPreparer(ctx context.Context, filter string, selectParameter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
+ if len(selectParameter) > 0 {
+ queryParameters["$select"] = autorest.Encode("query", selectParameter)
+ }
+ if maxResults != nil {
+ queryParameters["maxresults"] = autorest.Encode("query", *maxResults)
+ } else {
+ queryParameters["maxresults"] = autorest.Encode("query", 1000)
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPath("/certificates"),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client CertificateClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client CertificateClient) ListResponder(resp *http.Response) (result CertificateListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client CertificateClient) listNextResults(ctx context.Context, lastResults CertificateListResult) (result CertificateListResult, err error) {
+ req, err := lastResults.certificateListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "batch.CertificateClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "batch.CertificateClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client CertificateClient) ListComplete(ctx context.Context, filter string, selectParameter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result CertificateListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CertificateClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, filter, selectParameter, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ return
+}
diff --git a/services/batch/2019-08-01.10.0/batch/client.go b/services/batch/2019-08-01.10.0/batch/client.go
new file mode 100644
index 000000000000..7373e32ccece
--- /dev/null
+++ b/services/batch/2019-08-01.10.0/batch/client.go
@@ -0,0 +1,44 @@
+// Package batch implements the Azure ARM Batch service API version 2019-08-01.10.0.
+//
+// A client for issuing REST requests to the Azure Batch service.
+package batch
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/Azure/go-autorest/autorest"
+)
+
+// BaseClient is the base client for Batch.
+type BaseClient struct {
+ autorest.Client
+ BatchURL string
+}
+
+// New creates an instance of the BaseClient client.
+func New(batchURL string) BaseClient {
+ return NewWithoutDefaults(batchURL)
+}
+
+// NewWithoutDefaults creates an instance of the BaseClient client.
+func NewWithoutDefaults(batchURL string) BaseClient {
+ return BaseClient{
+ Client: autorest.NewClientWithUserAgent(UserAgent()),
+ BatchURL: batchURL,
+ }
+}
diff --git a/services/batch/2019-08-01.10.0/batch/computenode.go b/services/batch/2019-08-01.10.0/batch/computenode.go
new file mode 100644
index 000000000000..046e83ecc502
--- /dev/null
+++ b/services/batch/2019-08-01.10.0/batch/computenode.go
@@ -0,0 +1,1433 @@
+package batch
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/date"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "github.com/satori/go.uuid"
+ "net/http"
+)
+
+// ComputeNodeClient is the a client for issuing REST requests to the Azure Batch service.
+type ComputeNodeClient struct {
+ BaseClient
+}
+
+// NewComputeNodeClient creates an instance of the ComputeNodeClient client.
+func NewComputeNodeClient(batchURL string) ComputeNodeClient {
+ return ComputeNodeClient{New(batchURL)}
+}
+
+// AddUser you can add a user Account to a Compute Node only when it is in the idle or running state.
+// Parameters:
+// poolID - the ID of the Pool that contains the Compute Node.
+// nodeID - the ID of the machine on which you want to create a user Account.
+// userParameter - the user Account to be created.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client ComputeNodeClient) AddUser(ctx context.Context, poolID string, nodeID string, userParameter ComputeNodeUser, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ComputeNodeClient.AddUser")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: userParameter,
+ Constraints: []validation.Constraint{{Target: "userParameter.Name", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.ComputeNodeClient", "AddUser", err.Error())
+ }
+
+ req, err := client.AddUserPreparer(ctx, poolID, nodeID, userParameter, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "AddUser", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.AddUserSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "AddUser", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.AddUserResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "AddUser", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// AddUserPreparer prepares the AddUser request.
+func (client ComputeNodeClient) AddUserPreparer(ctx context.Context, poolID string, nodeID string, userParameter ComputeNodeUser, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "nodeId": autorest.Encode("path", nodeID),
+ "poolId": autorest.Encode("path", poolID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/users", pathParameters),
+ autorest.WithJSON(userParameter),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// AddUserSender sends the AddUser request. The method will close the
+// http.Response Body if it receives an error.
+func (client ComputeNodeClient) AddUserSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// AddUserResponder handles the response to the AddUser request. The method always
+// closes the http.Response Body.
+func (client ComputeNodeClient) AddUserResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// DeleteUser you can delete a user Account to a Compute Node only when it is in the idle or running state.
+// Parameters:
+// poolID - the ID of the Pool that contains the Compute Node.
+// nodeID - the ID of the machine on which you want to delete a user Account.
+// userName - the name of the user Account to delete.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client ComputeNodeClient) DeleteUser(ctx context.Context, poolID string, nodeID string, userName string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ComputeNodeClient.DeleteUser")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeleteUserPreparer(ctx, poolID, nodeID, userName, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "DeleteUser", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteUserSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "DeleteUser", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteUserResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "DeleteUser", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeleteUserPreparer prepares the DeleteUser request.
+func (client ComputeNodeClient) DeleteUserPreparer(ctx context.Context, poolID string, nodeID string, userName string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "nodeId": autorest.Encode("path", nodeID),
+ "poolId": autorest.Encode("path", poolID),
+ "userName": autorest.Encode("path", userName),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/users/{userName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteUserSender sends the DeleteUser request. The method will close the
+// http.Response Body if it receives an error.
+func (client ComputeNodeClient) DeleteUserSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteUserResponder handles the response to the DeleteUser request. The method always
+// closes the http.Response Body.
+func (client ComputeNodeClient) DeleteUserResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// DisableScheduling you can disable Task scheduling on a Compute Node only if its current scheduling state is enabled.
+// Parameters:
+// poolID - the ID of the Pool that contains the Compute Node.
+// nodeID - the ID of the Compute Node on which you want to disable Task scheduling.
+// nodeDisableSchedulingParameter - the parameters for the request.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client ComputeNodeClient) DisableScheduling(ctx context.Context, poolID string, nodeID string, nodeDisableSchedulingParameter *NodeDisableSchedulingParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ComputeNodeClient.DisableScheduling")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DisableSchedulingPreparer(ctx, poolID, nodeID, nodeDisableSchedulingParameter, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "DisableScheduling", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DisableSchedulingSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "DisableScheduling", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DisableSchedulingResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "DisableScheduling", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DisableSchedulingPreparer prepares the DisableScheduling request.
+func (client ComputeNodeClient) DisableSchedulingPreparer(ctx context.Context, poolID string, nodeID string, nodeDisableSchedulingParameter *NodeDisableSchedulingParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "nodeId": autorest.Encode("path", nodeID),
+ "poolId": autorest.Encode("path", poolID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/disablescheduling", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if nodeDisableSchedulingParameter != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithJSON(nodeDisableSchedulingParameter))
+ }
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DisableSchedulingSender sends the DisableScheduling request. The method will close the
+// http.Response Body if it receives an error.
+func (client ComputeNodeClient) DisableSchedulingSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DisableSchedulingResponder handles the response to the DisableScheduling request. The method always
+// closes the http.Response Body.
+func (client ComputeNodeClient) DisableSchedulingResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// EnableScheduling you can enable Task scheduling on a Compute Node only if its current scheduling state is disabled
+// Parameters:
+// poolID - the ID of the Pool that contains the Compute Node.
+// nodeID - the ID of the Compute Node on which you want to enable Task scheduling.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client ComputeNodeClient) EnableScheduling(ctx context.Context, poolID string, nodeID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ComputeNodeClient.EnableScheduling")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.EnableSchedulingPreparer(ctx, poolID, nodeID, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "EnableScheduling", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.EnableSchedulingSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "EnableScheduling", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.EnableSchedulingResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "EnableScheduling", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// EnableSchedulingPreparer prepares the EnableScheduling request.
+func (client ComputeNodeClient) EnableSchedulingPreparer(ctx context.Context, poolID string, nodeID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "nodeId": autorest.Encode("path", nodeID),
+ "poolId": autorest.Encode("path", poolID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/enablescheduling", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// EnableSchedulingSender sends the EnableScheduling request. The method will close the
+// http.Response Body if it receives an error.
+func (client ComputeNodeClient) EnableSchedulingSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// EnableSchedulingResponder handles the response to the EnableScheduling request. The method always
+// closes the http.Response Body.
+func (client ComputeNodeClient) EnableSchedulingResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get sends the get request.
+// Parameters:
+// poolID - the ID of the Pool that contains the Compute Node.
+// nodeID - the ID of the Compute Node that you want to get information about.
+// selectParameter - an OData $select clause.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client ComputeNodeClient) Get(ctx context.Context, poolID string, nodeID string, selectParameter string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result ComputeNode, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ComputeNodeClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, poolID, nodeID, selectParameter, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client ComputeNodeClient) GetPreparer(ctx context.Context, poolID string, nodeID string, selectParameter string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "nodeId": autorest.Encode("path", nodeID),
+ "poolId": autorest.Encode("path", poolID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(selectParameter) > 0 {
+ queryParameters["$select"] = autorest.Encode("query", selectParameter)
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client ComputeNodeClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client ComputeNodeClient) GetResponder(resp *http.Response) (result ComputeNode, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetRemoteDesktop before you can access a Compute Node by using the RDP file, you must create a user Account on the
+// Compute Node. This API can only be invoked on Pools created with a cloud service configuration. For Pools created
+// with a virtual machine configuration, see the GetRemoteLoginSettings API.
+// Parameters:
+// poolID - the ID of the Pool that contains the Compute Node.
+// nodeID - the ID of the Compute Node for which you want to get the Remote Desktop Protocol file.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client ComputeNodeClient) GetRemoteDesktop(ctx context.Context, poolID string, nodeID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result ReadCloser, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ComputeNodeClient.GetRemoteDesktop")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetRemoteDesktopPreparer(ctx, poolID, nodeID, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "GetRemoteDesktop", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetRemoteDesktopSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "GetRemoteDesktop", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetRemoteDesktopResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "GetRemoteDesktop", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetRemoteDesktopPreparer prepares the GetRemoteDesktop request.
+func (client ComputeNodeClient) GetRemoteDesktopPreparer(ctx context.Context, poolID string, nodeID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "nodeId": autorest.Encode("path", nodeID),
+ "poolId": autorest.Encode("path", poolID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/rdp", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetRemoteDesktopSender sends the GetRemoteDesktop request. The method will close the
+// http.Response Body if it receives an error.
+func (client ComputeNodeClient) GetRemoteDesktopSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetRemoteDesktopResponder handles the response to the GetRemoteDesktop request. The method always
+// closes the http.Response Body.
+func (client ComputeNodeClient) GetRemoteDesktopResponder(resp *http.Response) (result ReadCloser, err error) {
+ result.Value = &resp.Body
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK))
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetRemoteLoginSettings before you can remotely login to a Compute Node using the remote login settings, you must
+// create a user Account on the Compute Node. This API can be invoked only on Pools created with the virtual machine
+// configuration property. For Pools created with a cloud service configuration, see the GetRemoteDesktop API.
+// Parameters:
+// poolID - the ID of the Pool that contains the Compute Node.
+// nodeID - the ID of the Compute Node for which to obtain the remote login settings.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client ComputeNodeClient) GetRemoteLoginSettings(ctx context.Context, poolID string, nodeID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result ComputeNodeGetRemoteLoginSettingsResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ComputeNodeClient.GetRemoteLoginSettings")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetRemoteLoginSettingsPreparer(ctx, poolID, nodeID, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "GetRemoteLoginSettings", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetRemoteLoginSettingsSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "GetRemoteLoginSettings", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetRemoteLoginSettingsResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "GetRemoteLoginSettings", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetRemoteLoginSettingsPreparer prepares the GetRemoteLoginSettings request.
+func (client ComputeNodeClient) GetRemoteLoginSettingsPreparer(ctx context.Context, poolID string, nodeID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "nodeId": autorest.Encode("path", nodeID),
+ "poolId": autorest.Encode("path", poolID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/remoteloginsettings", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetRemoteLoginSettingsSender sends the GetRemoteLoginSettings request. The method will close the
+// http.Response Body if it receives an error.
+func (client ComputeNodeClient) GetRemoteLoginSettingsSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetRemoteLoginSettingsResponder handles the response to the GetRemoteLoginSettings request. The method always
+// closes the http.Response Body.
+func (client ComputeNodeClient) GetRemoteLoginSettingsResponder(resp *http.Response) (result ComputeNodeGetRemoteLoginSettingsResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List sends the list request.
+// Parameters:
+// poolID - the ID of the Pool from which you want to list Compute Nodes.
+// filter - an OData $filter clause. For more information on constructing this filter, see
+// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool.
+// selectParameter - an OData $select clause.
+// maxResults - the maximum number of items to return in the response. A maximum of 1000 Compute Nodes can be
+// returned.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client ComputeNodeClient) List(ctx context.Context, poolID string, filter string, selectParameter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result ComputeNodeListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ComputeNodeClient.List")
+ defer func() {
+ sc := -1
+ if result.cnlr.Response.Response != nil {
+ sc = result.cnlr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: maxResults,
+ Constraints: []validation.Constraint{{Target: "maxResults", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "maxResults", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil},
+ {Target: "maxResults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
+ }}}}}); err != nil {
+ return result, validation.NewError("batch.ComputeNodeClient", "List", err.Error())
+ }
+
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx, poolID, filter, selectParameter, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.cnlr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.cnlr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client ComputeNodeClient) ListPreparer(ctx context.Context, poolID string, filter string, selectParameter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "poolId": autorest.Encode("path", poolID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
+ if len(selectParameter) > 0 {
+ queryParameters["$select"] = autorest.Encode("query", selectParameter)
+ }
+ if maxResults != nil {
+ queryParameters["maxresults"] = autorest.Encode("query", *maxResults)
+ } else {
+ queryParameters["maxresults"] = autorest.Encode("query", 1000)
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/pools/{poolId}/nodes", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client ComputeNodeClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client ComputeNodeClient) ListResponder(resp *http.Response) (result ComputeNodeListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client ComputeNodeClient) listNextResults(ctx context.Context, lastResults ComputeNodeListResult) (result ComputeNodeListResult, err error) {
+ req, err := lastResults.computeNodeListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client ComputeNodeClient) ListComplete(ctx context.Context, poolID string, filter string, selectParameter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result ComputeNodeListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ComputeNodeClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, poolID, filter, selectParameter, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ return
+}
+
+// Reboot you can restart a Compute Node only if it is in an idle or running state.
+// Parameters:
+// poolID - the ID of the Pool that contains the Compute Node.
+// nodeID - the ID of the Compute Node that you want to restart.
+// nodeRebootParameter - the parameters for the request.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client ComputeNodeClient) Reboot(ctx context.Context, poolID string, nodeID string, nodeRebootParameter *NodeRebootParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ComputeNodeClient.Reboot")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.RebootPreparer(ctx, poolID, nodeID, nodeRebootParameter, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "Reboot", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.RebootSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "Reboot", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.RebootResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "Reboot", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// RebootPreparer prepares the Reboot request.
+func (client ComputeNodeClient) RebootPreparer(ctx context.Context, poolID string, nodeID string, nodeRebootParameter *NodeRebootParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "nodeId": autorest.Encode("path", nodeID),
+ "poolId": autorest.Encode("path", poolID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/reboot", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if nodeRebootParameter != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithJSON(nodeRebootParameter))
+ }
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// RebootSender sends the Reboot request. The method will close the
+// http.Response Body if it receives an error.
+func (client ComputeNodeClient) RebootSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// RebootResponder handles the response to the Reboot request. The method always
+// closes the http.Response Body.
+func (client ComputeNodeClient) RebootResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Reimage you can reinstall the operating system on a Compute Node only if it is in an idle or running state. This API
+// can be invoked only on Pools created with the cloud service configuration property.
+// Parameters:
+// poolID - the ID of the Pool that contains the Compute Node.
+// nodeID - the ID of the Compute Node that you want to restart.
+// nodeReimageParameter - the parameters for the request.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client ComputeNodeClient) Reimage(ctx context.Context, poolID string, nodeID string, nodeReimageParameter *NodeReimageParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ComputeNodeClient.Reimage")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.ReimagePreparer(ctx, poolID, nodeID, nodeReimageParameter, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "Reimage", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ReimageSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "Reimage", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ReimageResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "Reimage", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ReimagePreparer prepares the Reimage request.
+func (client ComputeNodeClient) ReimagePreparer(ctx context.Context, poolID string, nodeID string, nodeReimageParameter *NodeReimageParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "nodeId": autorest.Encode("path", nodeID),
+ "poolId": autorest.Encode("path", poolID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/reimage", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if nodeReimageParameter != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithJSON(nodeReimageParameter))
+ }
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ReimageSender sends the Reimage request. The method will close the
+// http.Response Body if it receives an error.
+func (client ComputeNodeClient) ReimageSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ReimageResponder handles the response to the Reimage request. The method always
+// closes the http.Response Body.
+func (client ComputeNodeClient) ReimageResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// UpdateUser this operation replaces of all the updatable properties of the Account. For example, if the expiryTime
+// element is not specified, the current value is replaced with the default value, not left unmodified. You can update
+// a user Account on a Compute Node only when it is in the idle or running state.
+// Parameters:
+// poolID - the ID of the Pool that contains the Compute Node.
+// nodeID - the ID of the machine on which you want to update a user Account.
+// userName - the name of the user Account to update.
+// nodeUpdateUserParameter - the parameters for the request.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client ComputeNodeClient) UpdateUser(ctx context.Context, poolID string, nodeID string, userName string, nodeUpdateUserParameter NodeUpdateUserParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ComputeNodeClient.UpdateUser")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.UpdateUserPreparer(ctx, poolID, nodeID, userName, nodeUpdateUserParameter, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "UpdateUser", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.UpdateUserSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "UpdateUser", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.UpdateUserResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "UpdateUser", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// UpdateUserPreparer prepares the UpdateUser request.
+func (client ComputeNodeClient) UpdateUserPreparer(ctx context.Context, poolID string, nodeID string, userName string, nodeUpdateUserParameter NodeUpdateUserParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "nodeId": autorest.Encode("path", nodeID),
+ "poolId": autorest.Encode("path", poolID),
+ "userName": autorest.Encode("path", userName),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/users/{userName}", pathParameters),
+ autorest.WithJSON(nodeUpdateUserParameter),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateUserSender sends the UpdateUser request. The method will close the
+// http.Response Body if it receives an error.
+func (client ComputeNodeClient) UpdateUserSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// UpdateUserResponder handles the response to the UpdateUser request. The method always
+// closes the http.Response Body.
+func (client ComputeNodeClient) UpdateUserResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// UploadBatchServiceLogs this is for gathering Azure Batch service log files in an automated fashion from Compute
+// Nodes if you are experiencing an error and wish to escalate to Azure support. The Azure Batch service log files
+// should be shared with Azure support to aid in debugging issues with the Batch service.
+// Parameters:
+// poolID - the ID of the Pool that contains the Compute Node.
+// nodeID - the ID of the Compute Node from which you want to upload the Azure Batch service log files.
+// uploadBatchServiceLogsConfiguration - the Azure Batch service log files upload configuration.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client ComputeNodeClient) UploadBatchServiceLogs(ctx context.Context, poolID string, nodeID string, uploadBatchServiceLogsConfiguration UploadBatchServiceLogsConfiguration, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result UploadBatchServiceLogsResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ComputeNodeClient.UploadBatchServiceLogs")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: uploadBatchServiceLogsConfiguration,
+ Constraints: []validation.Constraint{{Target: "uploadBatchServiceLogsConfiguration.ContainerURL", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "uploadBatchServiceLogsConfiguration.StartTime", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.ComputeNodeClient", "UploadBatchServiceLogs", err.Error())
+ }
+
+ req, err := client.UploadBatchServiceLogsPreparer(ctx, poolID, nodeID, uploadBatchServiceLogsConfiguration, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "UploadBatchServiceLogs", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.UploadBatchServiceLogsSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "UploadBatchServiceLogs", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.UploadBatchServiceLogsResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ComputeNodeClient", "UploadBatchServiceLogs", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// UploadBatchServiceLogsPreparer prepares the UploadBatchServiceLogs request.
+func (client ComputeNodeClient) UploadBatchServiceLogsPreparer(ctx context.Context, poolID string, nodeID string, uploadBatchServiceLogsConfiguration UploadBatchServiceLogsConfiguration, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "nodeId": autorest.Encode("path", nodeID),
+ "poolId": autorest.Encode("path", poolID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/uploadbatchservicelogs", pathParameters),
+ autorest.WithJSON(uploadBatchServiceLogsConfiguration),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UploadBatchServiceLogsSender sends the UploadBatchServiceLogs request. The method will close the
+// http.Response Body if it receives an error.
+func (client ComputeNodeClient) UploadBatchServiceLogsSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// UploadBatchServiceLogsResponder handles the response to the UploadBatchServiceLogs request. The method always
+// closes the http.Response Body.
+func (client ComputeNodeClient) UploadBatchServiceLogsResponder(resp *http.Response) (result UploadBatchServiceLogsResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/batch/2019-08-01.10.0/batch/file.go b/services/batch/2019-08-01.10.0/batch/file.go
new file mode 100644
index 000000000000..f2577632420d
--- /dev/null
+++ b/services/batch/2019-08-01.10.0/batch/file.go
@@ -0,0 +1,1102 @@
+package batch
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/date"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "github.com/satori/go.uuid"
+ "net/http"
+)
+
+// FileClient is the a client for issuing REST requests to the Azure Batch service.
+type FileClient struct {
+ BaseClient
+}
+
+// NewFileClient creates an instance of the FileClient client.
+func NewFileClient(batchURL string) FileClient {
+ return FileClient{New(batchURL)}
+}
+
+// DeleteFromComputeNode sends the delete from compute node request.
+// Parameters:
+// poolID - the ID of the Pool that contains the Compute Node.
+// nodeID - the ID of the Compute Node from which you want to delete the file.
+// filePath - the path to the file or directory that you want to delete.
+// recursive - whether to delete children of a directory. If the filePath parameter represents a directory
+// instead of a file, you can set recursive to true to delete the directory and all of the files and
+// subdirectories in it. If recursive is false then the directory must be empty or deletion will fail.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client FileClient) DeleteFromComputeNode(ctx context.Context, poolID string, nodeID string, filePath string, recursive *bool, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileClient.DeleteFromComputeNode")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeleteFromComputeNodePreparer(ctx, poolID, nodeID, filePath, recursive, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.FileClient", "DeleteFromComputeNode", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteFromComputeNodeSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.FileClient", "DeleteFromComputeNode", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteFromComputeNodeResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.FileClient", "DeleteFromComputeNode", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeleteFromComputeNodePreparer prepares the DeleteFromComputeNode request.
+func (client FileClient) DeleteFromComputeNodePreparer(ctx context.Context, poolID string, nodeID string, filePath string, recursive *bool, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "filePath": autorest.Encode("path", filePath),
+ "nodeId": autorest.Encode("path", nodeID),
+ "poolId": autorest.Encode("path", poolID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if recursive != nil {
+ queryParameters["recursive"] = autorest.Encode("query", *recursive)
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/files/{filePath}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteFromComputeNodeSender sends the DeleteFromComputeNode request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileClient) DeleteFromComputeNodeSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteFromComputeNodeResponder handles the response to the DeleteFromComputeNode request. The method always
+// closes the http.Response Body.
+func (client FileClient) DeleteFromComputeNodeResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// DeleteFromTask sends the delete from task request.
+// Parameters:
+// jobID - the ID of the Job that contains the Task.
+// taskID - the ID of the Task whose file you want to delete.
+// filePath - the path to the Task file or directory that you want to delete.
+// recursive - whether to delete children of a directory. If the filePath parameter represents a directory
+// instead of a file, you can set recursive to true to delete the directory and all of the files and
+// subdirectories in it. If recursive is false then the directory must be empty or deletion will fail.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client FileClient) DeleteFromTask(ctx context.Context, jobID string, taskID string, filePath string, recursive *bool, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileClient.DeleteFromTask")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeleteFromTaskPreparer(ctx, jobID, taskID, filePath, recursive, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.FileClient", "DeleteFromTask", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteFromTaskSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.FileClient", "DeleteFromTask", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteFromTaskResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.FileClient", "DeleteFromTask", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeleteFromTaskPreparer prepares the DeleteFromTask request.
+func (client FileClient) DeleteFromTaskPreparer(ctx context.Context, jobID string, taskID string, filePath string, recursive *bool, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "filePath": autorest.Encode("path", filePath),
+ "jobId": autorest.Encode("path", jobID),
+ "taskId": autorest.Encode("path", taskID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if recursive != nil {
+ queryParameters["recursive"] = autorest.Encode("query", *recursive)
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/jobs/{jobId}/tasks/{taskId}/files/{filePath}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteFromTaskSender sends the DeleteFromTask request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileClient) DeleteFromTaskSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteFromTaskResponder handles the response to the DeleteFromTask request. The method always
+// closes the http.Response Body.
+func (client FileClient) DeleteFromTaskResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// GetFromComputeNode returns the content of the specified Compute Node file.
+// Parameters:
+// poolID - the ID of the Pool that contains the Compute Node.
+// nodeID - the ID of the Compute Node that contains the file.
+// filePath - the path to the Compute Node file that you want to get the content of.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ocpRange - the byte range to be retrieved. The default is to retrieve the entire file. The format is
+// bytes=startRange-endRange.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client FileClient) GetFromComputeNode(ctx context.Context, poolID string, nodeID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ocpRange string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result ReadCloser, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileClient.GetFromComputeNode")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetFromComputeNodePreparer(ctx, poolID, nodeID, filePath, timeout, clientRequestID, returnClientRequestID, ocpDate, ocpRange, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.FileClient", "GetFromComputeNode", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetFromComputeNodeSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.FileClient", "GetFromComputeNode", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetFromComputeNodeResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.FileClient", "GetFromComputeNode", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetFromComputeNodePreparer prepares the GetFromComputeNode request.
+func (client FileClient) GetFromComputeNodePreparer(ctx context.Context, poolID string, nodeID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ocpRange string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "filePath": autorest.Encode("path", filePath),
+ "nodeId": autorest.Encode("path", nodeID),
+ "poolId": autorest.Encode("path", poolID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/files/{filePath}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if len(ocpRange) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-range", autorest.String(ocpRange)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetFromComputeNodeSender sends the GetFromComputeNode request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileClient) GetFromComputeNodeSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetFromComputeNodeResponder handles the response to the GetFromComputeNode request. The method always
+// closes the http.Response Body.
+func (client FileClient) GetFromComputeNodeResponder(resp *http.Response) (result ReadCloser, err error) {
+ result.Value = &resp.Body
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK))
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetFromTask returns the content of the specified Task file.
+// Parameters:
+// jobID - the ID of the Job that contains the Task.
+// taskID - the ID of the Task whose file you want to retrieve.
+// filePath - the path to the Task file that you want to get the content of.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ocpRange - the byte range to be retrieved. The default is to retrieve the entire file. The format is
+// bytes=startRange-endRange.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client FileClient) GetFromTask(ctx context.Context, jobID string, taskID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ocpRange string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result ReadCloser, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileClient.GetFromTask")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetFromTaskPreparer(ctx, jobID, taskID, filePath, timeout, clientRequestID, returnClientRequestID, ocpDate, ocpRange, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.FileClient", "GetFromTask", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetFromTaskSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.FileClient", "GetFromTask", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetFromTaskResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.FileClient", "GetFromTask", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetFromTaskPreparer prepares the GetFromTask request.
+func (client FileClient) GetFromTaskPreparer(ctx context.Context, jobID string, taskID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ocpRange string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "filePath": autorest.Encode("path", filePath),
+ "jobId": autorest.Encode("path", jobID),
+ "taskId": autorest.Encode("path", taskID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/jobs/{jobId}/tasks/{taskId}/files/{filePath}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if len(ocpRange) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-range", autorest.String(ocpRange)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetFromTaskSender sends the GetFromTask request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileClient) GetFromTaskSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetFromTaskResponder handles the response to the GetFromTask request. The method always
+// closes the http.Response Body.
+func (client FileClient) GetFromTaskResponder(resp *http.Response) (result ReadCloser, err error) {
+ result.Value = &resp.Body
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK))
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetPropertiesFromComputeNode gets the properties of the specified Compute Node file.
+// Parameters:
+// poolID - the ID of the Pool that contains the Compute Node.
+// nodeID - the ID of the Compute Node that contains the file.
+// filePath - the path to the Compute Node file that you want to get the properties of.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client FileClient) GetPropertiesFromComputeNode(ctx context.Context, poolID string, nodeID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileClient.GetPropertiesFromComputeNode")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPropertiesFromComputeNodePreparer(ctx, poolID, nodeID, filePath, timeout, clientRequestID, returnClientRequestID, ocpDate, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.FileClient", "GetPropertiesFromComputeNode", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetPropertiesFromComputeNodeSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.FileClient", "GetPropertiesFromComputeNode", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetPropertiesFromComputeNodeResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.FileClient", "GetPropertiesFromComputeNode", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPropertiesFromComputeNodePreparer prepares the GetPropertiesFromComputeNode request.
+func (client FileClient) GetPropertiesFromComputeNodePreparer(ctx context.Context, poolID string, nodeID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "filePath": autorest.Encode("path", filePath),
+ "nodeId": autorest.Encode("path", nodeID),
+ "poolId": autorest.Encode("path", poolID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsHead(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/files/{filePath}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetPropertiesFromComputeNodeSender sends the GetPropertiesFromComputeNode request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileClient) GetPropertiesFromComputeNodeSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetPropertiesFromComputeNodeResponder handles the response to the GetPropertiesFromComputeNode request. The method always
+// closes the http.Response Body.
+func (client FileClient) GetPropertiesFromComputeNodeResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// GetPropertiesFromTask gets the properties of the specified Task file.
+// Parameters:
+// jobID - the ID of the Job that contains the Task.
+// taskID - the ID of the Task whose file you want to get the properties of.
+// filePath - the path to the Task file that you want to get the properties of.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client FileClient) GetPropertiesFromTask(ctx context.Context, jobID string, taskID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileClient.GetPropertiesFromTask")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPropertiesFromTaskPreparer(ctx, jobID, taskID, filePath, timeout, clientRequestID, returnClientRequestID, ocpDate, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.FileClient", "GetPropertiesFromTask", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetPropertiesFromTaskSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.FileClient", "GetPropertiesFromTask", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetPropertiesFromTaskResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.FileClient", "GetPropertiesFromTask", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPropertiesFromTaskPreparer prepares the GetPropertiesFromTask request.
+func (client FileClient) GetPropertiesFromTaskPreparer(ctx context.Context, jobID string, taskID string, filePath string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "filePath": autorest.Encode("path", filePath),
+ "jobId": autorest.Encode("path", jobID),
+ "taskId": autorest.Encode("path", taskID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsHead(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/jobs/{jobId}/tasks/{taskId}/files/{filePath}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetPropertiesFromTaskSender sends the GetPropertiesFromTask request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileClient) GetPropertiesFromTaskSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetPropertiesFromTaskResponder handles the response to the GetPropertiesFromTask request. The method always
+// closes the http.Response Body.
+func (client FileClient) GetPropertiesFromTaskResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// ListFromComputeNode sends the list from compute node request.
+// Parameters:
+// poolID - the ID of the Pool that contains the Compute Node.
+// nodeID - the ID of the Compute Node whose files you want to list.
+// filter - an OData $filter clause. For more information on constructing this filter, see
+// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files.
+// recursive - whether to list children of a directory.
+// maxResults - the maximum number of items to return in the response. A maximum of 1000 files can be returned.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client FileClient) ListFromComputeNode(ctx context.Context, poolID string, nodeID string, filter string, recursive *bool, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result NodeFileListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileClient.ListFromComputeNode")
+ defer func() {
+ sc := -1
+ if result.nflr.Response.Response != nil {
+ sc = result.nflr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: maxResults,
+ Constraints: []validation.Constraint{{Target: "maxResults", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "maxResults", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil},
+ {Target: "maxResults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
+ }}}}}); err != nil {
+ return result, validation.NewError("batch.FileClient", "ListFromComputeNode", err.Error())
+ }
+
+ result.fn = client.listFromComputeNodeNextResults
+ req, err := client.ListFromComputeNodePreparer(ctx, poolID, nodeID, filter, recursive, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.FileClient", "ListFromComputeNode", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListFromComputeNodeSender(req)
+ if err != nil {
+ result.nflr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.FileClient", "ListFromComputeNode", resp, "Failure sending request")
+ return
+ }
+
+ result.nflr, err = client.ListFromComputeNodeResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.FileClient", "ListFromComputeNode", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListFromComputeNodePreparer prepares the ListFromComputeNode request.
+func (client FileClient) ListFromComputeNodePreparer(ctx context.Context, poolID string, nodeID string, filter string, recursive *bool, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "nodeId": autorest.Encode("path", nodeID),
+ "poolId": autorest.Encode("path", poolID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
+ if recursive != nil {
+ queryParameters["recursive"] = autorest.Encode("query", *recursive)
+ }
+ if maxResults != nil {
+ queryParameters["maxresults"] = autorest.Encode("query", *maxResults)
+ } else {
+ queryParameters["maxresults"] = autorest.Encode("query", 1000)
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/pools/{poolId}/nodes/{nodeId}/files", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListFromComputeNodeSender sends the ListFromComputeNode request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileClient) ListFromComputeNodeSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListFromComputeNodeResponder handles the response to the ListFromComputeNode request. The method always
+// closes the http.Response Body.
+func (client FileClient) ListFromComputeNodeResponder(resp *http.Response) (result NodeFileListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listFromComputeNodeNextResults retrieves the next set of results, if any.
+func (client FileClient) listFromComputeNodeNextResults(ctx context.Context, lastResults NodeFileListResult) (result NodeFileListResult, err error) {
+ req, err := lastResults.nodeFileListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "batch.FileClient", "listFromComputeNodeNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListFromComputeNodeSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "batch.FileClient", "listFromComputeNodeNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListFromComputeNodeResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.FileClient", "listFromComputeNodeNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListFromComputeNodeComplete enumerates all values, automatically crossing page boundaries as required.
+func (client FileClient) ListFromComputeNodeComplete(ctx context.Context, poolID string, nodeID string, filter string, recursive *bool, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result NodeFileListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileClient.ListFromComputeNode")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListFromComputeNode(ctx, poolID, nodeID, filter, recursive, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ return
+}
+
+// ListFromTask sends the list from task request.
+// Parameters:
+// jobID - the ID of the Job that contains the Task.
+// taskID - the ID of the Task whose files you want to list.
+// filter - an OData $filter clause. For more information on constructing this filter, see
+// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files.
+// recursive - whether to list children of the Task directory. This parameter can be used in combination with
+// the filter parameter to list specific type of files.
+// maxResults - the maximum number of items to return in the response. A maximum of 1000 files can be returned.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client FileClient) ListFromTask(ctx context.Context, jobID string, taskID string, filter string, recursive *bool, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result NodeFileListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileClient.ListFromTask")
+ defer func() {
+ sc := -1
+ if result.nflr.Response.Response != nil {
+ sc = result.nflr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: maxResults,
+ Constraints: []validation.Constraint{{Target: "maxResults", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "maxResults", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil},
+ {Target: "maxResults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
+ }}}}}); err != nil {
+ return result, validation.NewError("batch.FileClient", "ListFromTask", err.Error())
+ }
+
+ result.fn = client.listFromTaskNextResults
+ req, err := client.ListFromTaskPreparer(ctx, jobID, taskID, filter, recursive, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.FileClient", "ListFromTask", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListFromTaskSender(req)
+ if err != nil {
+ result.nflr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.FileClient", "ListFromTask", resp, "Failure sending request")
+ return
+ }
+
+ result.nflr, err = client.ListFromTaskResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.FileClient", "ListFromTask", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListFromTaskPreparer prepares the ListFromTask request.
+func (client FileClient) ListFromTaskPreparer(ctx context.Context, jobID string, taskID string, filter string, recursive *bool, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "jobId": autorest.Encode("path", jobID),
+ "taskId": autorest.Encode("path", taskID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
+ if recursive != nil {
+ queryParameters["recursive"] = autorest.Encode("query", *recursive)
+ }
+ if maxResults != nil {
+ queryParameters["maxresults"] = autorest.Encode("query", *maxResults)
+ } else {
+ queryParameters["maxresults"] = autorest.Encode("query", 1000)
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/jobs/{jobId}/tasks/{taskId}/files", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListFromTaskSender sends the ListFromTask request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileClient) ListFromTaskSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListFromTaskResponder handles the response to the ListFromTask request. The method always
+// closes the http.Response Body.
+func (client FileClient) ListFromTaskResponder(resp *http.Response) (result NodeFileListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listFromTaskNextResults retrieves the next set of results, if any.
+func (client FileClient) listFromTaskNextResults(ctx context.Context, lastResults NodeFileListResult) (result NodeFileListResult, err error) {
+ req, err := lastResults.nodeFileListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "batch.FileClient", "listFromTaskNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListFromTaskSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "batch.FileClient", "listFromTaskNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListFromTaskResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.FileClient", "listFromTaskNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListFromTaskComplete enumerates all values, automatically crossing page boundaries as required.
+func (client FileClient) ListFromTaskComplete(ctx context.Context, jobID string, taskID string, filter string, recursive *bool, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result NodeFileListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileClient.ListFromTask")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListFromTask(ctx, jobID, taskID, filter, recursive, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ return
+}
diff --git a/services/batch/2019-08-01.10.0/batch/job.go b/services/batch/2019-08-01.10.0/batch/job.go
new file mode 100644
index 000000000000..b09588768359
--- /dev/null
+++ b/services/batch/2019-08-01.10.0/batch/job.go
@@ -0,0 +1,1924 @@
+package batch
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/date"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "github.com/satori/go.uuid"
+ "net/http"
+)
+
+// JobClient is the a client for issuing REST requests to the Azure Batch service.
+type JobClient struct {
+ BaseClient
+}
+
+// NewJobClient creates an instance of the JobClient client.
+func NewJobClient(batchURL string) JobClient {
+ return JobClient{New(batchURL)}
+}
+
+// Add the Batch service supports two ways to control the work done as part of a Job. In the first approach, the user
+// specifies a Job Manager Task. The Batch service launches this Task when it is ready to start the Job. The Job
+// Manager Task controls all other Tasks that run under this Job, by using the Task APIs. In the second approach, the
+// user directly controls the execution of Tasks under an active Job, by using the Task APIs. Also note: when naming
+// Jobs, avoid including sensitive information such as user names or secret project names. This information may appear
+// in telemetry logs accessible to Microsoft Support engineers.
+// Parameters:
+// job - the Job to be added.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client JobClient) Add(ctx context.Context, job JobAddParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.Add")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: job,
+ Constraints: []validation.Constraint{{Target: "job.ID", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "job.JobManagerTask", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "job.JobManagerTask.ID", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "job.JobManagerTask.CommandLine", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "job.JobManagerTask.ContainerSettings", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "job.JobManagerTask.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "job.JobManagerTask.ContainerSettings.Registry", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "job.JobManagerTask.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "job.JobManagerTask.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ }},
+ }},
+ {Target: "job.JobPreparationTask", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "job.JobPreparationTask.CommandLine", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "job.JobPreparationTask.ContainerSettings", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "job.JobPreparationTask.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "job.JobPreparationTask.ContainerSettings.Registry", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "job.JobPreparationTask.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "job.JobPreparationTask.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ }},
+ }},
+ {Target: "job.JobReleaseTask", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "job.JobReleaseTask.CommandLine", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "job.JobReleaseTask.ContainerSettings", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "job.JobReleaseTask.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "job.JobReleaseTask.ContainerSettings.Registry", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "job.JobReleaseTask.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "job.JobReleaseTask.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ }},
+ }},
+ {Target: "job.PoolInfo", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "job.PoolInfo.AutoPoolSpecification", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "job.PoolInfo.AutoPoolSpecification.Pool", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "job.PoolInfo.AutoPoolSpecification.Pool.VMSize", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "job.PoolInfo.AutoPoolSpecification.Pool.CloudServiceConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "job.PoolInfo.AutoPoolSpecification.Pool.CloudServiceConfiguration.OsFamily", Name: validation.Null, Rule: true, Chain: nil}}},
+ {Target: "job.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "job.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.ImageReference", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "job.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.NodeAgentSKUID", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "job.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.ContainerConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "job.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.ContainerConfiguration.Type", Name: validation.Null, Rule: true, Chain: nil}}},
+ }},
+ {Target: "job.PoolInfo.AutoPoolSpecification.Pool.NetworkConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "job.PoolInfo.AutoPoolSpecification.Pool.NetworkConfiguration.EndpointConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "job.PoolInfo.AutoPoolSpecification.Pool.NetworkConfiguration.EndpointConfiguration.InboundNATPools", Name: validation.Null, Rule: true, Chain: nil}}},
+ }},
+ {Target: "job.PoolInfo.AutoPoolSpecification.Pool.StartTask", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "job.PoolInfo.AutoPoolSpecification.Pool.StartTask.CommandLine", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "job.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "job.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "job.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.Registry", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "job.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "job.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ }},
+ }},
+ }},
+ }},
+ }},
+ {Target: "job.NetworkConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "job.NetworkConfiguration.SubnetID", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil {
+ return result, validation.NewError("batch.JobClient", "Add", err.Error())
+ }
+
+ req, err := client.AddPreparer(ctx, job, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "Add", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.AddSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "Add", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.AddResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "Add", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// AddPreparer prepares the Add request.
+func (client JobClient) AddPreparer(ctx context.Context, job JobAddParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPath("/jobs"),
+ autorest.WithJSON(job),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// AddSender sends the Add request. The method will close the
+// http.Response Body if it receives an error.
+func (client JobClient) AddSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// AddResponder handles the response to the Add request. The method always
+// closes the http.Response Body.
+func (client JobClient) AddResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Delete deleting a Job also deletes all Tasks that are part of that Job, and all Job statistics. This also overrides
+// the retention period for Task data; that is, if the Job contains Tasks which are still retained on Compute Nodes,
+// the Batch services deletes those Tasks' working directories and all their contents. When a Delete Job request is
+// received, the Batch service sets the Job to the deleting state. All update operations on a Job that is in deleting
+// state will fail with status code 409 (Conflict), with additional information indicating that the Job is being
+// deleted.
+// Parameters:
+// jobID - the ID of the Job to delete.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will
+// be performed only if the resource's current ETag on the service exactly matches the value specified by the
+// client.
+// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation
+// will be performed only if the resource's current ETag on the service does not match the value specified by
+// the client.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client JobClient) Delete(ctx context.Context, jobID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, jobID, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client JobClient) DeletePreparer(ctx context.Context, jobID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "jobId": autorest.Encode("path", jobID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/jobs/{jobId}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client JobClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client JobClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Disable the Batch Service immediately moves the Job to the disabling state. Batch then uses the disableTasks
+// parameter to determine what to do with the currently running Tasks of the Job. The Job remains in the disabling
+// state until the disable operation is completed and all Tasks have been dealt with according to the disableTasks
+// option; the Job then moves to the disabled state. No new Tasks are started under the Job until it moves back to
+// active state. If you try to disable a Job that is in any state other than active, disabling, or disabled, the
+// request fails with status code 409.
+// Parameters:
+// jobID - the ID of the Job to disable.
+// jobDisableParameter - the parameters for the request.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will
+// be performed only if the resource's current ETag on the service exactly matches the value specified by the
+// client.
+// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation
+// will be performed only if the resource's current ETag on the service does not match the value specified by
+// the client.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client JobClient) Disable(ctx context.Context, jobID string, jobDisableParameter JobDisableParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.Disable")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DisablePreparer(ctx, jobID, jobDisableParameter, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "Disable", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DisableSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "Disable", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DisableResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "Disable", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DisablePreparer prepares the Disable request.
+func (client JobClient) DisablePreparer(ctx context.Context, jobID string, jobDisableParameter JobDisableParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "jobId": autorest.Encode("path", jobID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/jobs/{jobId}/disable", pathParameters),
+ autorest.WithJSON(jobDisableParameter),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DisableSender sends the Disable request. The method will close the
+// http.Response Body if it receives an error.
+func (client JobClient) DisableSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DisableResponder handles the response to the Disable request. The method always
+// closes the http.Response Body.
+func (client JobClient) DisableResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Enable when you call this API, the Batch service sets a disabled Job to the enabling state. After the this operation
+// is completed, the Job moves to the active state, and scheduling of new Tasks under the Job resumes. The Batch
+// service does not allow a Task to remain in the active state for more than 180 days. Therefore, if you enable a Job
+// containing active Tasks which were added more than 180 days ago, those Tasks will not run.
+// Parameters:
+// jobID - the ID of the Job to enable.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will
+// be performed only if the resource's current ETag on the service exactly matches the value specified by the
+// client.
+// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation
+// will be performed only if the resource's current ETag on the service does not match the value specified by
+// the client.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client JobClient) Enable(ctx context.Context, jobID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.Enable")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.EnablePreparer(ctx, jobID, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "Enable", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.EnableSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "Enable", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.EnableResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "Enable", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// EnablePreparer prepares the Enable request.
+func (client JobClient) EnablePreparer(ctx context.Context, jobID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "jobId": autorest.Encode("path", jobID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/jobs/{jobId}/enable", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// EnableSender sends the Enable request. The method will close the
+// http.Response Body if it receives an error.
+func (client JobClient) EnableSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// EnableResponder handles the response to the Enable request. The method always
+// closes the http.Response Body.
+func (client JobClient) EnableResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get sends the get request.
+// Parameters:
+// jobID - the ID of the Job.
+// selectParameter - an OData $select clause.
+// expand - an OData $expand clause.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will
+// be performed only if the resource's current ETag on the service exactly matches the value specified by the
+// client.
+// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation
+// will be performed only if the resource's current ETag on the service does not match the value specified by
+// the client.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client JobClient) Get(ctx context.Context, jobID string, selectParameter string, expand string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result CloudJob, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, jobID, selectParameter, expand, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client JobClient) GetPreparer(ctx context.Context, jobID string, selectParameter string, expand string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "jobId": autorest.Encode("path", jobID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(selectParameter) > 0 {
+ queryParameters["$select"] = autorest.Encode("query", selectParameter)
+ }
+ if len(expand) > 0 {
+ queryParameters["$expand"] = autorest.Encode("query", expand)
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/jobs/{jobId}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client JobClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client JobClient) GetResponder(resp *http.Response) (result CloudJob, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetAllLifetimeStatistics statistics are aggregated across all Jobs that have ever existed in the Account, from
+// Account creation to the last update time of the statistics. The statistics may not be immediately available. The
+// Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes.
+// Parameters:
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client JobClient) GetAllLifetimeStatistics(ctx context.Context, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result JobStatistics, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.GetAllLifetimeStatistics")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetAllLifetimeStatisticsPreparer(ctx, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "GetAllLifetimeStatistics", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetAllLifetimeStatisticsSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "GetAllLifetimeStatistics", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetAllLifetimeStatisticsResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "GetAllLifetimeStatistics", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetAllLifetimeStatisticsPreparer prepares the GetAllLifetimeStatistics request.
+func (client JobClient) GetAllLifetimeStatisticsPreparer(ctx context.Context, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPath("/lifetimejobstats"),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetAllLifetimeStatisticsSender sends the GetAllLifetimeStatistics request. The method will close the
+// http.Response Body if it receives an error.
+func (client JobClient) GetAllLifetimeStatisticsSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetAllLifetimeStatisticsResponder handles the response to the GetAllLifetimeStatistics request. The method always
+// closes the http.Response Body.
+func (client JobClient) GetAllLifetimeStatisticsResponder(resp *http.Response) (result JobStatistics, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetTaskCounts task counts provide a count of the Tasks by active, running or completed Task state, and a count of
+// Tasks which succeeded or failed. Tasks in the preparing state are counted as running.
+// Parameters:
+// jobID - the ID of the Job.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client JobClient) GetTaskCounts(ctx context.Context, jobID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result TaskCounts, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.GetTaskCounts")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetTaskCountsPreparer(ctx, jobID, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "GetTaskCounts", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetTaskCountsSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "GetTaskCounts", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetTaskCountsResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "GetTaskCounts", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetTaskCountsPreparer prepares the GetTaskCounts request.
+func (client JobClient) GetTaskCountsPreparer(ctx context.Context, jobID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "jobId": autorest.Encode("path", jobID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/jobs/{jobId}/taskcounts", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetTaskCountsSender sends the GetTaskCounts request. The method will close the
+// http.Response Body if it receives an error.
+func (client JobClient) GetTaskCountsSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetTaskCountsResponder handles the response to the GetTaskCounts request. The method always
+// closes the http.Response Body.
+func (client JobClient) GetTaskCountsResponder(resp *http.Response) (result TaskCounts, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List sends the list request.
+// Parameters:
+// filter - an OData $filter clause. For more information on constructing this filter, see
+// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs.
+// selectParameter - an OData $select clause.
+// expand - an OData $expand clause.
+// maxResults - the maximum number of items to return in the response. A maximum of 1000 Jobs can be returned.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client JobClient) List(ctx context.Context, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result CloudJobListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.List")
+ defer func() {
+ sc := -1
+ if result.cjlr.Response.Response != nil {
+ sc = result.cjlr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: maxResults,
+ Constraints: []validation.Constraint{{Target: "maxResults", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "maxResults", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil},
+ {Target: "maxResults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
+ }}}}}); err != nil {
+ return result, validation.NewError("batch.JobClient", "List", err.Error())
+ }
+
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx, filter, selectParameter, expand, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.cjlr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.cjlr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client JobClient) ListPreparer(ctx context.Context, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
+ if len(selectParameter) > 0 {
+ queryParameters["$select"] = autorest.Encode("query", selectParameter)
+ }
+ if len(expand) > 0 {
+ queryParameters["$expand"] = autorest.Encode("query", expand)
+ }
+ if maxResults != nil {
+ queryParameters["maxresults"] = autorest.Encode("query", *maxResults)
+ } else {
+ queryParameters["maxresults"] = autorest.Encode("query", 1000)
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPath("/jobs"),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client JobClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client JobClient) ListResponder(resp *http.Response) (result CloudJobListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client JobClient) listNextResults(ctx context.Context, lastResults CloudJobListResult) (result CloudJobListResult, err error) {
+ req, err := lastResults.cloudJobListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "batch.JobClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "batch.JobClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client JobClient) ListComplete(ctx context.Context, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result CloudJobListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, filter, selectParameter, expand, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ return
+}
+
+// ListFromJobSchedule sends the list from job schedule request.
+// Parameters:
+// jobScheduleID - the ID of the Job Schedule from which you want to get a list of Jobs.
+// filter - an OData $filter clause. For more information on constructing this filter, see
+// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule.
+// selectParameter - an OData $select clause.
+// expand - an OData $expand clause.
+// maxResults - the maximum number of items to return in the response. A maximum of 1000 Jobs can be returned.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client JobClient) ListFromJobSchedule(ctx context.Context, jobScheduleID string, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result CloudJobListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.ListFromJobSchedule")
+ defer func() {
+ sc := -1
+ if result.cjlr.Response.Response != nil {
+ sc = result.cjlr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: maxResults,
+ Constraints: []validation.Constraint{{Target: "maxResults", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "maxResults", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil},
+ {Target: "maxResults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
+ }}}}}); err != nil {
+ return result, validation.NewError("batch.JobClient", "ListFromJobSchedule", err.Error())
+ }
+
+ result.fn = client.listFromJobScheduleNextResults
+ req, err := client.ListFromJobSchedulePreparer(ctx, jobScheduleID, filter, selectParameter, expand, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "ListFromJobSchedule", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListFromJobScheduleSender(req)
+ if err != nil {
+ result.cjlr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "ListFromJobSchedule", resp, "Failure sending request")
+ return
+ }
+
+ result.cjlr, err = client.ListFromJobScheduleResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "ListFromJobSchedule", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListFromJobSchedulePreparer prepares the ListFromJobSchedule request.
+func (client JobClient) ListFromJobSchedulePreparer(ctx context.Context, jobScheduleID string, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "jobScheduleId": autorest.Encode("path", jobScheduleID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
+ if len(selectParameter) > 0 {
+ queryParameters["$select"] = autorest.Encode("query", selectParameter)
+ }
+ if len(expand) > 0 {
+ queryParameters["$expand"] = autorest.Encode("query", expand)
+ }
+ if maxResults != nil {
+ queryParameters["maxresults"] = autorest.Encode("query", *maxResults)
+ } else {
+ queryParameters["maxresults"] = autorest.Encode("query", 1000)
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/jobschedules/{jobScheduleId}/jobs", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListFromJobScheduleSender sends the ListFromJobSchedule request. The method will close the
+// http.Response Body if it receives an error.
+func (client JobClient) ListFromJobScheduleSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListFromJobScheduleResponder handles the response to the ListFromJobSchedule request. The method always
+// closes the http.Response Body.
+func (client JobClient) ListFromJobScheduleResponder(resp *http.Response) (result CloudJobListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listFromJobScheduleNextResults retrieves the next set of results, if any.
+func (client JobClient) listFromJobScheduleNextResults(ctx context.Context, lastResults CloudJobListResult) (result CloudJobListResult, err error) {
+ req, err := lastResults.cloudJobListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "batch.JobClient", "listFromJobScheduleNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListFromJobScheduleSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "batch.JobClient", "listFromJobScheduleNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListFromJobScheduleResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "listFromJobScheduleNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListFromJobScheduleComplete enumerates all values, automatically crossing page boundaries as required.
+func (client JobClient) ListFromJobScheduleComplete(ctx context.Context, jobScheduleID string, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result CloudJobListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.ListFromJobSchedule")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListFromJobSchedule(ctx, jobScheduleID, filter, selectParameter, expand, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ return
+}
+
+// ListPreparationAndReleaseTaskStatus this API returns the Job Preparation and Job Release Task status on all Compute
+// Nodes that have run the Job Preparation or Job Release Task. This includes Compute Nodes which have since been
+// removed from the Pool. If this API is invoked on a Job which has no Job Preparation or Job Release Task, the Batch
+// service returns HTTP status code 409 (Conflict) with an error code of JobPreparationTaskNotSpecified.
+// Parameters:
+// jobID - the ID of the Job.
+// filter - an OData $filter clause. For more information on constructing this filter, see
+// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status.
+// selectParameter - an OData $select clause.
+// maxResults - the maximum number of items to return in the response. A maximum of 1000 Tasks can be returned.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client JobClient) ListPreparationAndReleaseTaskStatus(ctx context.Context, jobID string, filter string, selectParameter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result CloudJobListPreparationAndReleaseTaskStatusResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.ListPreparationAndReleaseTaskStatus")
+ defer func() {
+ sc := -1
+ if result.cjlpartsr.Response.Response != nil {
+ sc = result.cjlpartsr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: maxResults,
+ Constraints: []validation.Constraint{{Target: "maxResults", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "maxResults", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil},
+ {Target: "maxResults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
+ }}}}}); err != nil {
+ return result, validation.NewError("batch.JobClient", "ListPreparationAndReleaseTaskStatus", err.Error())
+ }
+
+ result.fn = client.listPreparationAndReleaseTaskStatusNextResults
+ req, err := client.ListPreparationAndReleaseTaskStatusPreparer(ctx, jobID, filter, selectParameter, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "ListPreparationAndReleaseTaskStatus", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListPreparationAndReleaseTaskStatusSender(req)
+ if err != nil {
+ result.cjlpartsr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "ListPreparationAndReleaseTaskStatus", resp, "Failure sending request")
+ return
+ }
+
+ result.cjlpartsr, err = client.ListPreparationAndReleaseTaskStatusResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "ListPreparationAndReleaseTaskStatus", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparationAndReleaseTaskStatusPreparer prepares the ListPreparationAndReleaseTaskStatus request.
+func (client JobClient) ListPreparationAndReleaseTaskStatusPreparer(ctx context.Context, jobID string, filter string, selectParameter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "jobId": autorest.Encode("path", jobID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
+ if len(selectParameter) > 0 {
+ queryParameters["$select"] = autorest.Encode("query", selectParameter)
+ }
+ if maxResults != nil {
+ queryParameters["maxresults"] = autorest.Encode("query", *maxResults)
+ } else {
+ queryParameters["maxresults"] = autorest.Encode("query", 1000)
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/jobs/{jobId}/jobpreparationandreleasetaskstatus", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListPreparationAndReleaseTaskStatusSender sends the ListPreparationAndReleaseTaskStatus request. The method will close the
+// http.Response Body if it receives an error.
+func (client JobClient) ListPreparationAndReleaseTaskStatusSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListPreparationAndReleaseTaskStatusResponder handles the response to the ListPreparationAndReleaseTaskStatus request. The method always
+// closes the http.Response Body.
+func (client JobClient) ListPreparationAndReleaseTaskStatusResponder(resp *http.Response) (result CloudJobListPreparationAndReleaseTaskStatusResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listPreparationAndReleaseTaskStatusNextResults retrieves the next set of results, if any.
+func (client JobClient) listPreparationAndReleaseTaskStatusNextResults(ctx context.Context, lastResults CloudJobListPreparationAndReleaseTaskStatusResult) (result CloudJobListPreparationAndReleaseTaskStatusResult, err error) {
+ req, err := lastResults.cloudJobListPreparationAndReleaseTaskStatusResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "batch.JobClient", "listPreparationAndReleaseTaskStatusNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListPreparationAndReleaseTaskStatusSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "batch.JobClient", "listPreparationAndReleaseTaskStatusNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListPreparationAndReleaseTaskStatusResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "listPreparationAndReleaseTaskStatusNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListPreparationAndReleaseTaskStatusComplete enumerates all values, automatically crossing page boundaries as required.
+func (client JobClient) ListPreparationAndReleaseTaskStatusComplete(ctx context.Context, jobID string, filter string, selectParameter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result CloudJobListPreparationAndReleaseTaskStatusResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.ListPreparationAndReleaseTaskStatus")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListPreparationAndReleaseTaskStatus(ctx, jobID, filter, selectParameter, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ return
+}
+
+// Patch this replaces only the Job properties specified in the request. For example, if the Job has constraints, and a
+// request does not specify the constraints element, then the Job keeps the existing constraints.
+// Parameters:
+// jobID - the ID of the Job whose properties you want to update.
+// jobPatchParameter - the parameters for the request.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will
+// be performed only if the resource's current ETag on the service exactly matches the value specified by the
+// client.
+// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation
+// will be performed only if the resource's current ETag on the service does not match the value specified by
+// the client.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client JobClient) Patch(ctx context.Context, jobID string, jobPatchParameter JobPatchParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.Patch")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.PatchPreparer(ctx, jobID, jobPatchParameter, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "Patch", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.PatchSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "Patch", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.PatchResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "Patch", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// PatchPreparer prepares the Patch request.
+func (client JobClient) PatchPreparer(ctx context.Context, jobID string, jobPatchParameter JobPatchParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "jobId": autorest.Encode("path", jobID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/jobs/{jobId}", pathParameters),
+ autorest.WithJSON(jobPatchParameter),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// PatchSender sends the Patch request. The method will close the
+// http.Response Body if it receives an error.
+func (client JobClient) PatchSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// PatchResponder handles the response to the Patch request. The method always
+// closes the http.Response Body.
+func (client JobClient) PatchResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Terminate when a Terminate Job request is received, the Batch service sets the Job to the terminating state. The
+// Batch service then terminates any running Tasks associated with the Job and runs any required Job release Tasks.
+// Then the Job moves into the completed state. If there are any Tasks in the Job in the active state, they will remain
+// in the active state. Once a Job is terminated, new Tasks cannot be added and any remaining active Tasks will not be
+// scheduled.
+// Parameters:
+// jobID - the ID of the Job to terminate.
+// jobTerminateParameter - the parameters for the request.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will
+// be performed only if the resource's current ETag on the service exactly matches the value specified by the
+// client.
+// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation
+// will be performed only if the resource's current ETag on the service does not match the value specified by
+// the client.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client JobClient) Terminate(ctx context.Context, jobID string, jobTerminateParameter *JobTerminateParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.Terminate")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.TerminatePreparer(ctx, jobID, jobTerminateParameter, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "Terminate", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.TerminateSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "Terminate", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.TerminateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "Terminate", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// TerminatePreparer prepares the Terminate request.
+func (client JobClient) TerminatePreparer(ctx context.Context, jobID string, jobTerminateParameter *JobTerminateParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "jobId": autorest.Encode("path", jobID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/jobs/{jobId}/terminate", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if jobTerminateParameter != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithJSON(jobTerminateParameter))
+ }
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// TerminateSender sends the Terminate request. The method will close the
+// http.Response Body if it receives an error.
+func (client JobClient) TerminateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// TerminateResponder handles the response to the Terminate request. The method always
+// closes the http.Response Body.
+func (client JobClient) TerminateResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Update this fully replaces all the updatable properties of the Job. For example, if the Job has constraints
+// associated with it and if constraints is not specified with this request, then the Batch service will remove the
+// existing constraints.
+// Parameters:
+// jobID - the ID of the Job whose properties you want to update.
+// jobUpdateParameter - the parameters for the request.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will
+// be performed only if the resource's current ETag on the service exactly matches the value specified by the
+// client.
+// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation
+// will be performed only if the resource's current ETag on the service does not match the value specified by
+// the client.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client JobClient) Update(ctx context.Context, jobID string, jobUpdateParameter JobUpdateParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/JobClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: jobUpdateParameter,
+ Constraints: []validation.Constraint{{Target: "jobUpdateParameter.PoolInfo", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.VMSize", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.CloudServiceConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.CloudServiceConfiguration.OsFamily", Name: validation.Null, Rule: true, Chain: nil}}},
+ {Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.ImageReference", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.NodeAgentSKUID", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.ContainerConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.ContainerConfiguration.Type", Name: validation.Null, Rule: true, Chain: nil}}},
+ }},
+ {Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.NetworkConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.NetworkConfiguration.EndpointConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.NetworkConfiguration.EndpointConfiguration.InboundNATPools", Name: validation.Null, Rule: true, Chain: nil}}},
+ }},
+ {Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.StartTask", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.StartTask.CommandLine", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.Registry", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "jobUpdateParameter.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ }},
+ }},
+ }},
+ }},
+ }}}}}); err != nil {
+ return result, validation.NewError("batch.JobClient", "Update", err.Error())
+ }
+
+ req, err := client.UpdatePreparer(ctx, jobID, jobUpdateParameter, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "Update", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.UpdateSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "Update", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.UpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobClient", "Update", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// UpdatePreparer prepares the Update request.
+func (client JobClient) UpdatePreparer(ctx context.Context, jobID string, jobUpdateParameter JobUpdateParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "jobId": autorest.Encode("path", jobID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/jobs/{jobId}", pathParameters),
+ autorest.WithJSON(jobUpdateParameter),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateSender sends the Update request. The method will close the
+// http.Response Body if it receives an error.
+func (client JobClient) UpdateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// UpdateResponder handles the response to the Update request. The method always
+// closes the http.Response Body.
+func (client JobClient) UpdateResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
diff --git a/services/batch/2019-08-01.10.0/batch/jobschedule.go b/services/batch/2019-08-01.10.0/batch/jobschedule.go
new file mode 100644
index 000000000000..14ac8c019c7d
--- /dev/null
+++ b/services/batch/2019-08-01.10.0/batch/jobschedule.go
@@ -0,0 +1,1521 @@
+package batch
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/date"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "github.com/satori/go.uuid"
+ "net/http"
+)
+
+// JobScheduleClient is the a client for issuing REST requests to the Azure Batch service.
+type JobScheduleClient struct {
+ BaseClient
+}
+
+// NewJobScheduleClient creates an instance of the JobScheduleClient client.
+func NewJobScheduleClient(batchURL string) JobScheduleClient {
+ return JobScheduleClient{New(batchURL)}
+}
+
+// Add sends the add request.
+// Parameters:
+// cloudJobSchedule - the Job Schedule to be added.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client JobScheduleClient) Add(ctx context.Context, cloudJobSchedule JobScheduleAddParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/JobScheduleClient.Add")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: cloudJobSchedule,
+ Constraints: []validation.Constraint{{Target: "cloudJobSchedule.ID", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "cloudJobSchedule.Schedule", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "cloudJobSchedule.JobSpecification", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.NetworkConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.NetworkConfiguration.SubnetID", Name: validation.Null, Rule: true, Chain: nil}}},
+ {Target: "cloudJobSchedule.JobSpecification.JobManagerTask", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.JobManagerTask.ID", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "cloudJobSchedule.JobSpecification.JobManagerTask.CommandLine", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "cloudJobSchedule.JobSpecification.JobManagerTask.ContainerSettings", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.JobManagerTask.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "cloudJobSchedule.JobSpecification.JobManagerTask.ContainerSettings.Registry", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.JobManagerTask.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "cloudJobSchedule.JobSpecification.JobManagerTask.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ }},
+ }},
+ {Target: "cloudJobSchedule.JobSpecification.JobPreparationTask", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.JobPreparationTask.CommandLine", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "cloudJobSchedule.JobSpecification.JobPreparationTask.ContainerSettings", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.JobPreparationTask.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "cloudJobSchedule.JobSpecification.JobPreparationTask.ContainerSettings.Registry", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.JobPreparationTask.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "cloudJobSchedule.JobSpecification.JobPreparationTask.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ }},
+ }},
+ {Target: "cloudJobSchedule.JobSpecification.JobReleaseTask", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.JobReleaseTask.CommandLine", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "cloudJobSchedule.JobSpecification.JobReleaseTask.ContainerSettings", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.JobReleaseTask.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "cloudJobSchedule.JobSpecification.JobReleaseTask.ContainerSettings.Registry", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.JobReleaseTask.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "cloudJobSchedule.JobSpecification.JobReleaseTask.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ }},
+ }},
+ {Target: "cloudJobSchedule.JobSpecification.PoolInfo", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.VMSize", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.CloudServiceConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.CloudServiceConfiguration.OsFamily", Name: validation.Null, Rule: true, Chain: nil}}},
+ {Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.ImageReference", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.NodeAgentSKUID", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.ContainerConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.ContainerConfiguration.Type", Name: validation.Null, Rule: true, Chain: nil}}},
+ }},
+ {Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.NetworkConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.NetworkConfiguration.EndpointConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.NetworkConfiguration.EndpointConfiguration.InboundNATPools", Name: validation.Null, Rule: true, Chain: nil}}},
+ }},
+ {Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.StartTask", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.StartTask.CommandLine", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.Registry", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "cloudJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ }},
+ }},
+ }},
+ }},
+ }},
+ }}}}}); err != nil {
+ return result, validation.NewError("batch.JobScheduleClient", "Add", err.Error())
+ }
+
+ req, err := client.AddPreparer(ctx, cloudJobSchedule, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Add", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.AddSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Add", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.AddResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Add", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// AddPreparer prepares the Add request.
+func (client JobScheduleClient) AddPreparer(ctx context.Context, cloudJobSchedule JobScheduleAddParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPath("/jobschedules"),
+ autorest.WithJSON(cloudJobSchedule),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// AddSender sends the Add request. The method will close the
+// http.Response Body if it receives an error.
+func (client JobScheduleClient) AddSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// AddResponder handles the response to the Add request. The method always
+// closes the http.Response Body.
+func (client JobScheduleClient) AddResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Delete when you delete a Job Schedule, this also deletes all Jobs and Tasks under that schedule. When Tasks are
+// deleted, all the files in their working directories on the Compute Nodes are also deleted (the retention period is
+// ignored). The Job Schedule statistics are no longer accessible once the Job Schedule is deleted, though they are
+// still counted towards Account lifetime statistics.
+// Parameters:
+// jobScheduleID - the ID of the Job Schedule to delete.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will
+// be performed only if the resource's current ETag on the service exactly matches the value specified by the
+// client.
+// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation
+// will be performed only if the resource's current ETag on the service does not match the value specified by
+// the client.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client JobScheduleClient) Delete(ctx context.Context, jobScheduleID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/JobScheduleClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, jobScheduleID, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client JobScheduleClient) DeletePreparer(ctx context.Context, jobScheduleID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "jobScheduleId": autorest.Encode("path", jobScheduleID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/jobschedules/{jobScheduleId}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client JobScheduleClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client JobScheduleClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Disable no new Jobs will be created until the Job Schedule is enabled again.
+// Parameters:
+// jobScheduleID - the ID of the Job Schedule to disable.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will
+// be performed only if the resource's current ETag on the service exactly matches the value specified by the
+// client.
+// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation
+// will be performed only if the resource's current ETag on the service does not match the value specified by
+// the client.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client JobScheduleClient) Disable(ctx context.Context, jobScheduleID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/JobScheduleClient.Disable")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DisablePreparer(ctx, jobScheduleID, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Disable", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DisableSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Disable", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DisableResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Disable", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DisablePreparer prepares the Disable request.
+func (client JobScheduleClient) DisablePreparer(ctx context.Context, jobScheduleID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "jobScheduleId": autorest.Encode("path", jobScheduleID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/jobschedules/{jobScheduleId}/disable", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DisableSender sends the Disable request. The method will close the
+// http.Response Body if it receives an error.
+func (client JobScheduleClient) DisableSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DisableResponder handles the response to the Disable request. The method always
+// closes the http.Response Body.
+func (client JobScheduleClient) DisableResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Enable sends the enable request.
+// Parameters:
+// jobScheduleID - the ID of the Job Schedule to enable.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will
+// be performed only if the resource's current ETag on the service exactly matches the value specified by the
+// client.
+// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation
+// will be performed only if the resource's current ETag on the service does not match the value specified by
+// the client.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client JobScheduleClient) Enable(ctx context.Context, jobScheduleID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/JobScheduleClient.Enable")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.EnablePreparer(ctx, jobScheduleID, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Enable", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.EnableSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Enable", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.EnableResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Enable", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// EnablePreparer prepares the Enable request.
+func (client JobScheduleClient) EnablePreparer(ctx context.Context, jobScheduleID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "jobScheduleId": autorest.Encode("path", jobScheduleID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/jobschedules/{jobScheduleId}/enable", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// EnableSender sends the Enable request. The method will close the
+// http.Response Body if it receives an error.
+func (client JobScheduleClient) EnableSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// EnableResponder handles the response to the Enable request. The method always
+// closes the http.Response Body.
+func (client JobScheduleClient) EnableResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Exists sends the exists request.
+// Parameters:
+// jobScheduleID - the ID of the Job Schedule which you want to check.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will
+// be performed only if the resource's current ETag on the service exactly matches the value specified by the
+// client.
+// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation
+// will be performed only if the resource's current ETag on the service does not match the value specified by
+// the client.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client JobScheduleClient) Exists(ctx context.Context, jobScheduleID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/JobScheduleClient.Exists")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.ExistsPreparer(ctx, jobScheduleID, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Exists", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ExistsSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Exists", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ExistsResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Exists", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ExistsPreparer prepares the Exists request.
+func (client JobScheduleClient) ExistsPreparer(ctx context.Context, jobScheduleID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "jobScheduleId": autorest.Encode("path", jobScheduleID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsHead(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/jobschedules/{jobScheduleId}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ExistsSender sends the Exists request. The method will close the
+// http.Response Body if it receives an error.
+func (client JobScheduleClient) ExistsSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ExistsResponder handles the response to the Exists request. The method always
+// closes the http.Response Body.
+func (client JobScheduleClient) ExistsResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets information about the specified Job Schedule.
+// Parameters:
+// jobScheduleID - the ID of the Job Schedule to get.
+// selectParameter - an OData $select clause.
+// expand - an OData $expand clause.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will
+// be performed only if the resource's current ETag on the service exactly matches the value specified by the
+// client.
+// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation
+// will be performed only if the resource's current ETag on the service does not match the value specified by
+// the client.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client JobScheduleClient) Get(ctx context.Context, jobScheduleID string, selectParameter string, expand string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result CloudJobSchedule, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/JobScheduleClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, jobScheduleID, selectParameter, expand, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client JobScheduleClient) GetPreparer(ctx context.Context, jobScheduleID string, selectParameter string, expand string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "jobScheduleId": autorest.Encode("path", jobScheduleID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(selectParameter) > 0 {
+ queryParameters["$select"] = autorest.Encode("query", selectParameter)
+ }
+ if len(expand) > 0 {
+ queryParameters["$expand"] = autorest.Encode("query", expand)
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/jobschedules/{jobScheduleId}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client JobScheduleClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client JobScheduleClient) GetResponder(resp *http.Response) (result CloudJobSchedule, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List sends the list request.
+// Parameters:
+// filter - an OData $filter clause. For more information on constructing this filter, see
+// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules.
+// selectParameter - an OData $select clause.
+// expand - an OData $expand clause.
+// maxResults - the maximum number of items to return in the response. A maximum of 1000 Job Schedules can be
+// returned.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client JobScheduleClient) List(ctx context.Context, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result CloudJobScheduleListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/JobScheduleClient.List")
+ defer func() {
+ sc := -1
+ if result.cjslr.Response.Response != nil {
+ sc = result.cjslr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: maxResults,
+ Constraints: []validation.Constraint{{Target: "maxResults", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "maxResults", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil},
+ {Target: "maxResults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
+ }}}}}); err != nil {
+ return result, validation.NewError("batch.JobScheduleClient", "List", err.Error())
+ }
+
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx, filter, selectParameter, expand, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.cjslr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.cjslr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client JobScheduleClient) ListPreparer(ctx context.Context, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
+ if len(selectParameter) > 0 {
+ queryParameters["$select"] = autorest.Encode("query", selectParameter)
+ }
+ if len(expand) > 0 {
+ queryParameters["$expand"] = autorest.Encode("query", expand)
+ }
+ if maxResults != nil {
+ queryParameters["maxresults"] = autorest.Encode("query", *maxResults)
+ } else {
+ queryParameters["maxresults"] = autorest.Encode("query", 1000)
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPath("/jobschedules"),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client JobScheduleClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client JobScheduleClient) ListResponder(resp *http.Response) (result CloudJobScheduleListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client JobScheduleClient) listNextResults(ctx context.Context, lastResults CloudJobScheduleListResult) (result CloudJobScheduleListResult, err error) {
+ req, err := lastResults.cloudJobScheduleListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "batch.JobScheduleClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "batch.JobScheduleClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client JobScheduleClient) ListComplete(ctx context.Context, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result CloudJobScheduleListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/JobScheduleClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, filter, selectParameter, expand, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ return
+}
+
+// Patch this replaces only the Job Schedule properties specified in the request. For example, if the schedule property
+// is not specified with this request, then the Batch service will keep the existing schedule. Changes to a Job
+// Schedule only impact Jobs created by the schedule after the update has taken place; currently running Jobs are
+// unaffected.
+// Parameters:
+// jobScheduleID - the ID of the Job Schedule to update.
+// jobSchedulePatchParameter - the parameters for the request.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will
+// be performed only if the resource's current ETag on the service exactly matches the value specified by the
+// client.
+// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation
+// will be performed only if the resource's current ETag on the service does not match the value specified by
+// the client.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client JobScheduleClient) Patch(ctx context.Context, jobScheduleID string, jobSchedulePatchParameter JobSchedulePatchParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/JobScheduleClient.Patch")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.PatchPreparer(ctx, jobScheduleID, jobSchedulePatchParameter, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Patch", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.PatchSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Patch", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.PatchResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Patch", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// PatchPreparer prepares the Patch request.
+func (client JobScheduleClient) PatchPreparer(ctx context.Context, jobScheduleID string, jobSchedulePatchParameter JobSchedulePatchParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "jobScheduleId": autorest.Encode("path", jobScheduleID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/jobschedules/{jobScheduleId}", pathParameters),
+ autorest.WithJSON(jobSchedulePatchParameter),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// PatchSender sends the Patch request. The method will close the
+// http.Response Body if it receives an error.
+func (client JobScheduleClient) PatchSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// PatchResponder handles the response to the Patch request. The method always
+// closes the http.Response Body.
+func (client JobScheduleClient) PatchResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Terminate sends the terminate request.
+// Parameters:
+// jobScheduleID - the ID of the Job Schedule to terminates.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will
+// be performed only if the resource's current ETag on the service exactly matches the value specified by the
+// client.
+// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation
+// will be performed only if the resource's current ETag on the service does not match the value specified by
+// the client.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client JobScheduleClient) Terminate(ctx context.Context, jobScheduleID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/JobScheduleClient.Terminate")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.TerminatePreparer(ctx, jobScheduleID, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Terminate", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.TerminateSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Terminate", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.TerminateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Terminate", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// TerminatePreparer prepares the Terminate request.
+func (client JobScheduleClient) TerminatePreparer(ctx context.Context, jobScheduleID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "jobScheduleId": autorest.Encode("path", jobScheduleID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/jobschedules/{jobScheduleId}/terminate", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// TerminateSender sends the Terminate request. The method will close the
+// http.Response Body if it receives an error.
+func (client JobScheduleClient) TerminateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// TerminateResponder handles the response to the Terminate request. The method always
+// closes the http.Response Body.
+func (client JobScheduleClient) TerminateResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Update this fully replaces all the updatable properties of the Job Schedule. For example, if the schedule property
+// is not specified with this request, then the Batch service will remove the existing schedule. Changes to a Job
+// Schedule only impact Jobs created by the schedule after the update has taken place; currently running Jobs are
+// unaffected.
+// Parameters:
+// jobScheduleID - the ID of the Job Schedule to update.
+// jobScheduleUpdateParameter - the parameters for the request.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will
+// be performed only if the resource's current ETag on the service exactly matches the value specified by the
+// client.
+// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation
+// will be performed only if the resource's current ETag on the service does not match the value specified by
+// the client.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client JobScheduleClient) Update(ctx context.Context, jobScheduleID string, jobScheduleUpdateParameter JobScheduleUpdateParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/JobScheduleClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: jobScheduleUpdateParameter,
+ Constraints: []validation.Constraint{{Target: "jobScheduleUpdateParameter.Schedule", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "jobScheduleUpdateParameter.JobSpecification", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.NetworkConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.NetworkConfiguration.SubnetID", Name: validation.Null, Rule: true, Chain: nil}}},
+ {Target: "jobScheduleUpdateParameter.JobSpecification.JobManagerTask", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.JobManagerTask.ID", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "jobScheduleUpdateParameter.JobSpecification.JobManagerTask.CommandLine", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "jobScheduleUpdateParameter.JobSpecification.JobManagerTask.ContainerSettings", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.JobManagerTask.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "jobScheduleUpdateParameter.JobSpecification.JobManagerTask.ContainerSettings.Registry", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.JobManagerTask.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "jobScheduleUpdateParameter.JobSpecification.JobManagerTask.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ }},
+ }},
+ {Target: "jobScheduleUpdateParameter.JobSpecification.JobPreparationTask", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.JobPreparationTask.CommandLine", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "jobScheduleUpdateParameter.JobSpecification.JobPreparationTask.ContainerSettings", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.JobPreparationTask.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "jobScheduleUpdateParameter.JobSpecification.JobPreparationTask.ContainerSettings.Registry", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.JobPreparationTask.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "jobScheduleUpdateParameter.JobSpecification.JobPreparationTask.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ }},
+ }},
+ {Target: "jobScheduleUpdateParameter.JobSpecification.JobReleaseTask", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.JobReleaseTask.CommandLine", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "jobScheduleUpdateParameter.JobSpecification.JobReleaseTask.ContainerSettings", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.JobReleaseTask.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "jobScheduleUpdateParameter.JobSpecification.JobReleaseTask.ContainerSettings.Registry", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.JobReleaseTask.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "jobScheduleUpdateParameter.JobSpecification.JobReleaseTask.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ }},
+ }},
+ {Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.VMSize", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.CloudServiceConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.CloudServiceConfiguration.OsFamily", Name: validation.Null, Rule: true, Chain: nil}}},
+ {Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.ImageReference", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.NodeAgentSKUID", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.ContainerConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.ContainerConfiguration.Type", Name: validation.Null, Rule: true, Chain: nil}}},
+ }},
+ {Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.NetworkConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.NetworkConfiguration.EndpointConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.NetworkConfiguration.EndpointConfiguration.InboundNATPools", Name: validation.Null, Rule: true, Chain: nil}}},
+ }},
+ {Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.StartTask", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.StartTask.CommandLine", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.Registry", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "jobScheduleUpdateParameter.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.StartTask.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ }},
+ }},
+ }},
+ }},
+ }},
+ }}}}}); err != nil {
+ return result, validation.NewError("batch.JobScheduleClient", "Update", err.Error())
+ }
+
+ req, err := client.UpdatePreparer(ctx, jobScheduleID, jobScheduleUpdateParameter, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Update", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.UpdateSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Update", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.UpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.JobScheduleClient", "Update", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// UpdatePreparer prepares the Update request.
+func (client JobScheduleClient) UpdatePreparer(ctx context.Context, jobScheduleID string, jobScheduleUpdateParameter JobScheduleUpdateParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "jobScheduleId": autorest.Encode("path", jobScheduleID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/jobschedules/{jobScheduleId}", pathParameters),
+ autorest.WithJSON(jobScheduleUpdateParameter),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateSender sends the Update request. The method will close the
+// http.Response Body if it receives an error.
+func (client JobScheduleClient) UpdateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// UpdateResponder handles the response to the Update request. The method always
+// closes the http.Response Body.
+func (client JobScheduleClient) UpdateResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
diff --git a/services/batch/2019-08-01.10.0/batch/models.go b/services/batch/2019-08-01.10.0/batch/models.go
new file mode 100644
index 000000000000..d257731256c6
--- /dev/null
+++ b/services/batch/2019-08-01.10.0/batch/models.go
@@ -0,0 +1,4292 @@
+package batch
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/date"
+ "github.com/Azure/go-autorest/autorest/to"
+ "github.com/Azure/go-autorest/tracing"
+ "io"
+ "net/http"
+)
+
+// The package's fully qualified name.
+const fqdn = "github.com/Azure/azure-sdk-for-go/services/batch/2019-08-01.10.0/batch"
+
+// AccessScope enumerates the values for access scope.
+type AccessScope string
+
+const (
+ // Job Grants access to perform all operations on the Job containing the Task.
+ Job AccessScope = "job"
+)
+
+// PossibleAccessScopeValues returns an array of possible values for the AccessScope const type.
+func PossibleAccessScopeValues() []AccessScope {
+ return []AccessScope{Job}
+}
+
+// AllocationState enumerates the values for allocation state.
+type AllocationState string
+
+const (
+ // Resizing The Pool is resizing; that is, Compute Nodes are being added to or removed from the Pool.
+ Resizing AllocationState = "resizing"
+ // Steady The Pool is not resizing. There are no changes to the number of Compute Nodes in the Pool in
+ // progress. A Pool enters this state when it is created and when no operations are being performed on the
+ // Pool to change the number of Compute Nodes.
+ Steady AllocationState = "steady"
+ // Stopping The Pool was resizing, but the user has requested that the resize be stopped, but the stop
+ // request has not yet been completed.
+ Stopping AllocationState = "stopping"
+)
+
+// PossibleAllocationStateValues returns an array of possible values for the AllocationState const type.
+func PossibleAllocationStateValues() []AllocationState {
+ return []AllocationState{Resizing, Steady, Stopping}
+}
+
+// AutoUserScope enumerates the values for auto user scope.
+type AutoUserScope string
+
+const (
+ // Pool Specifies that the Task runs as the common auto user Account which is created on every Compute Node
+ // in a Pool.
+ Pool AutoUserScope = "pool"
+ // Task Specifies that the service should create a new user for the Task.
+ Task AutoUserScope = "task"
+)
+
+// PossibleAutoUserScopeValues returns an array of possible values for the AutoUserScope const type.
+func PossibleAutoUserScopeValues() []AutoUserScope {
+ return []AutoUserScope{Pool, Task}
+}
+
+// CachingType enumerates the values for caching type.
+type CachingType string
+
+const (
+ // None The caching mode for the disk is not enabled.
+ None CachingType = "none"
+ // ReadOnly The caching mode for the disk is read only.
+ ReadOnly CachingType = "readonly"
+ // ReadWrite The caching mode for the disk is read and write.
+ ReadWrite CachingType = "readwrite"
+)
+
+// PossibleCachingTypeValues returns an array of possible values for the CachingType const type.
+func PossibleCachingTypeValues() []CachingType {
+ return []CachingType{None, ReadOnly, ReadWrite}
+}
+
+// CertificateFormat enumerates the values for certificate format.
+type CertificateFormat string
+
+const (
+ // Cer The Certificate is a base64-encoded X.509 Certificate.
+ Cer CertificateFormat = "cer"
+ // Pfx The Certificate is a PFX (PKCS#12) formatted Certificate or Certificate chain.
+ Pfx CertificateFormat = "pfx"
+)
+
+// PossibleCertificateFormatValues returns an array of possible values for the CertificateFormat const type.
+func PossibleCertificateFormatValues() []CertificateFormat {
+ return []CertificateFormat{Cer, Pfx}
+}
+
+// CertificateState enumerates the values for certificate state.
+type CertificateState string
+
+const (
+ // Active The Certificate is available for use in Pools.
+ Active CertificateState = "active"
+ // DeleteFailed The user requested that the Certificate be deleted, but there are Pools that still have
+ // references to the Certificate, or it is still installed on one or more Nodes. (The latter can occur if
+ // the Certificate has been removed from the Pool, but the Compute Node has not yet restarted. Compute
+ // Nodes refresh their Certificates only when they restart.) You may use the cancel Certificate delete
+ // operation to cancel the delete, or the delete Certificate operation to retry the delete.
+ DeleteFailed CertificateState = "deletefailed"
+ // Deleting The user has requested that the Certificate be deleted, but the delete operation has not yet
+ // completed. You may not reference the Certificate when creating or updating Pools.
+ Deleting CertificateState = "deleting"
+)
+
+// PossibleCertificateStateValues returns an array of possible values for the CertificateState const type.
+func PossibleCertificateStateValues() []CertificateState {
+ return []CertificateState{Active, DeleteFailed, Deleting}
+}
+
+// CertificateStoreLocation enumerates the values for certificate store location.
+type CertificateStoreLocation string
+
+const (
+ // CurrentUser Certificates should be installed to the CurrentUser Certificate store.
+ CurrentUser CertificateStoreLocation = "currentuser"
+ // LocalMachine Certificates should be installed to the LocalMachine Certificate store.
+ LocalMachine CertificateStoreLocation = "localmachine"
+)
+
+// PossibleCertificateStoreLocationValues returns an array of possible values for the CertificateStoreLocation const type.
+func PossibleCertificateStoreLocationValues() []CertificateStoreLocation {
+ return []CertificateStoreLocation{CurrentUser, LocalMachine}
+}
+
+// CertificateVisibility enumerates the values for certificate visibility.
+type CertificateVisibility string
+
+const (
+ // CertificateVisibilityRemoteUser The Certificate should be visible to the user accounts under which users
+ // remotely access the Compute Node.
+ CertificateVisibilityRemoteUser CertificateVisibility = "remoteuser"
+ // CertificateVisibilityStartTask The Certificate should be visible to the user account under which the
+ // StartTask is run. Note that if AutoUser Scope is Pool for both the StartTask and a Task, this
+ // certificate will be visible to the Task as well.
+ CertificateVisibilityStartTask CertificateVisibility = "starttask"
+ // CertificateVisibilityTask The Certificate should be visible to the user accounts under which Job Tasks
+ // are run.
+ CertificateVisibilityTask CertificateVisibility = "task"
+)
+
+// PossibleCertificateVisibilityValues returns an array of possible values for the CertificateVisibility const type.
+func PossibleCertificateVisibilityValues() []CertificateVisibility {
+ return []CertificateVisibility{CertificateVisibilityRemoteUser, CertificateVisibilityStartTask, CertificateVisibilityTask}
+}
+
+// ComputeNodeDeallocationOption enumerates the values for compute node deallocation option.
+type ComputeNodeDeallocationOption string
+
+const (
+ // Requeue Terminate running Task processes and requeue the Tasks. The Tasks will run again when a Compute
+ // Node is available. Remove Compute Nodes as soon as Tasks have been terminated.
+ Requeue ComputeNodeDeallocationOption = "requeue"
+ // RetainedData Allow currently running Tasks to complete, then wait for all Task data retention periods to
+ // expire. Schedule no new Tasks while waiting. Remove Compute Nodes when all Task retention periods have
+ // expired.
+ RetainedData ComputeNodeDeallocationOption = "retaineddata"
+ // TaskCompletion Allow currently running Tasks to complete. Schedule no new Tasks while waiting. Remove
+ // Compute Nodes when all Tasks have completed.
+ TaskCompletion ComputeNodeDeallocationOption = "taskcompletion"
+ // Terminate Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they
+ // were terminated, and will not run again. Remove Compute Nodes as soon as Tasks have been terminated.
+ Terminate ComputeNodeDeallocationOption = "terminate"
+)
+
+// PossibleComputeNodeDeallocationOptionValues returns an array of possible values for the ComputeNodeDeallocationOption const type.
+func PossibleComputeNodeDeallocationOptionValues() []ComputeNodeDeallocationOption {
+ return []ComputeNodeDeallocationOption{Requeue, RetainedData, TaskCompletion, Terminate}
+}
+
+// ComputeNodeFillType enumerates the values for compute node fill type.
+type ComputeNodeFillType string
+
+const (
+ // Pack As many Tasks as possible (maxTasksPerNode) should be assigned to each Compute Node in the Pool
+ // before any Tasks are assigned to the next Compute Node in the Pool.
+ Pack ComputeNodeFillType = "pack"
+ // Spread Tasks should be assigned evenly across all Compute Nodes in the Pool.
+ Spread ComputeNodeFillType = "spread"
+)
+
+// PossibleComputeNodeFillTypeValues returns an array of possible values for the ComputeNodeFillType const type.
+func PossibleComputeNodeFillTypeValues() []ComputeNodeFillType {
+ return []ComputeNodeFillType{Pack, Spread}
+}
+
+// ComputeNodeRebootOption enumerates the values for compute node reboot option.
+type ComputeNodeRebootOption string
+
+const (
+ // ComputeNodeRebootOptionRequeue Terminate running Task processes and requeue the Tasks. The Tasks will
+ // run again when a Compute Node is available. Restart the Compute Node as soon as Tasks have been
+ // terminated.
+ ComputeNodeRebootOptionRequeue ComputeNodeRebootOption = "requeue"
+ // ComputeNodeRebootOptionRetainedData Allow currently running Tasks to complete, then wait for all Task
+ // data retention periods to expire. Schedule no new Tasks while waiting. Restart the Compute Node when all
+ // Task retention periods have expired.
+ ComputeNodeRebootOptionRetainedData ComputeNodeRebootOption = "retaineddata"
+ // ComputeNodeRebootOptionTaskCompletion Allow currently running Tasks to complete. Schedule no new Tasks
+ // while waiting. Restart the Compute Node when all Tasks have completed.
+ ComputeNodeRebootOptionTaskCompletion ComputeNodeRebootOption = "taskcompletion"
+ // ComputeNodeRebootOptionTerminate Terminate running Tasks. The Tasks will be completed with failureInfo
+ // indicating that they were terminated, and will not run again. Restart the Compute Node as soon as Tasks
+ // have been terminated.
+ ComputeNodeRebootOptionTerminate ComputeNodeRebootOption = "terminate"
+)
+
+// PossibleComputeNodeRebootOptionValues returns an array of possible values for the ComputeNodeRebootOption const type.
+func PossibleComputeNodeRebootOptionValues() []ComputeNodeRebootOption {
+ return []ComputeNodeRebootOption{ComputeNodeRebootOptionRequeue, ComputeNodeRebootOptionRetainedData, ComputeNodeRebootOptionTaskCompletion, ComputeNodeRebootOptionTerminate}
+}
+
+// ComputeNodeReimageOption enumerates the values for compute node reimage option.
+type ComputeNodeReimageOption string
+
+const (
+ // ComputeNodeReimageOptionRequeue Terminate running Task processes and requeue the Tasks. The Tasks will
+ // run again when a Compute Node is available. Reimage the Compute Node as soon as Tasks have been
+ // terminated.
+ ComputeNodeReimageOptionRequeue ComputeNodeReimageOption = "requeue"
+ // ComputeNodeReimageOptionRetainedData Allow currently running Tasks to complete, then wait for all Task
+ // data retention periods to expire. Schedule no new Tasks while waiting. Reimage the Compute Node when all
+ // Task retention periods have expired.
+ ComputeNodeReimageOptionRetainedData ComputeNodeReimageOption = "retaineddata"
+ // ComputeNodeReimageOptionTaskCompletion Allow currently running Tasks to complete. Schedule no new Tasks
+ // while waiting. Reimage the Compute Node when all Tasks have completed.
+ ComputeNodeReimageOptionTaskCompletion ComputeNodeReimageOption = "taskcompletion"
+ // ComputeNodeReimageOptionTerminate Terminate running Tasks. The Tasks will be completed with failureInfo
+ // indicating that they were terminated, and will not run again. Reimage the Compute Node as soon as Tasks
+ // have been terminated.
+ ComputeNodeReimageOptionTerminate ComputeNodeReimageOption = "terminate"
+)
+
+// PossibleComputeNodeReimageOptionValues returns an array of possible values for the ComputeNodeReimageOption const type.
+func PossibleComputeNodeReimageOptionValues() []ComputeNodeReimageOption {
+ return []ComputeNodeReimageOption{ComputeNodeReimageOptionRequeue, ComputeNodeReimageOptionRetainedData, ComputeNodeReimageOptionTaskCompletion, ComputeNodeReimageOptionTerminate}
+}
+
+// ComputeNodeState enumerates the values for compute node state.
+type ComputeNodeState string
+
+const (
+ // Creating The Batch service has obtained the underlying virtual machine from Azure Compute, but it has
+ // not yet started to join the Pool.
+ Creating ComputeNodeState = "creating"
+ // Idle The Compute Node is not currently running a Task.
+ Idle ComputeNodeState = "idle"
+ // LeavingPool The Compute Node is leaving the Pool, either because the user explicitly removed it or
+ // because the Pool is resizing or autoscaling down.
+ LeavingPool ComputeNodeState = "leavingpool"
+ // Offline The Compute Node is not currently running a Task, and scheduling of new Tasks to the Compute
+ // Node is disabled.
+ Offline ComputeNodeState = "offline"
+ // Preempted The low-priority Compute Node has been preempted. Tasks which were running on the Compute Node
+ // when it was preempted will be rescheduled when another Compute Node becomes available.
+ Preempted ComputeNodeState = "preempted"
+ // Rebooting The Compute Node is rebooting.
+ Rebooting ComputeNodeState = "rebooting"
+ // Reimaging The Compute Node is reimaging.
+ Reimaging ComputeNodeState = "reimaging"
+ // Running The Compute Node is running one or more Tasks (other than a StartTask).
+ Running ComputeNodeState = "running"
+ // Starting The Batch service is starting on the underlying virtual machine.
+ Starting ComputeNodeState = "starting"
+ // StartTaskFailed The StartTask has failed on the Compute Node (and exhausted all retries), and
+ // waitForSuccess is set. The Compute Node is not usable for running Tasks.
+ StartTaskFailed ComputeNodeState = "starttaskfailed"
+ // Unknown The Batch service has lost contact with the Compute Node, and does not know its true state.
+ Unknown ComputeNodeState = "unknown"
+ // Unusable The Compute Node cannot be used for Task execution due to errors.
+ Unusable ComputeNodeState = "unusable"
+ // WaitingForStartTask The StartTask has started running on the Compute Node, but waitForSuccess is set and
+ // the StartTask has not yet completed.
+ WaitingForStartTask ComputeNodeState = "waitingforstarttask"
+)
+
+// PossibleComputeNodeStateValues returns an array of possible values for the ComputeNodeState const type.
+func PossibleComputeNodeStateValues() []ComputeNodeState {
+ return []ComputeNodeState{Creating, Idle, LeavingPool, Offline, Preempted, Rebooting, Reimaging, Running, Starting, StartTaskFailed, Unknown, Unusable, WaitingForStartTask}
+}
+
+// ContainerWorkingDirectory enumerates the values for container working directory.
+type ContainerWorkingDirectory string
+
+const (
+ // ContainerImageDefault Use the working directory defined in the container Image. Beware that this
+ // directory will not contain the Resource Files downloaded by Batch.
+ ContainerImageDefault ContainerWorkingDirectory = "containerImageDefault"
+ // TaskWorkingDirectory Use the standard Batch service Task working directory, which will contain the Task
+ // Resource Files populated by Batch.
+ TaskWorkingDirectory ContainerWorkingDirectory = "taskWorkingDirectory"
+)
+
+// PossibleContainerWorkingDirectoryValues returns an array of possible values for the ContainerWorkingDirectory const type.
+func PossibleContainerWorkingDirectoryValues() []ContainerWorkingDirectory {
+ return []ContainerWorkingDirectory{ContainerImageDefault, TaskWorkingDirectory}
+}
+
+// DependencyAction enumerates the values for dependency action.
+type DependencyAction string
+
+const (
+ // Block Blocks tasks waiting on this task, preventing them from being scheduled.
+ Block DependencyAction = "block"
+ // Satisfy Satisfy tasks waiting on this task; once all dependencies are satisfied, the task will be
+ // scheduled to run.
+ Satisfy DependencyAction = "satisfy"
+)
+
+// PossibleDependencyActionValues returns an array of possible values for the DependencyAction const type.
+func PossibleDependencyActionValues() []DependencyAction {
+ return []DependencyAction{Block, Satisfy}
+}
+
+// DisableComputeNodeSchedulingOption enumerates the values for disable compute node scheduling option.
+type DisableComputeNodeSchedulingOption string
+
+const (
+ // DisableComputeNodeSchedulingOptionRequeue Terminate running Task processes and requeue the Tasks. The
+ // Tasks may run again on other Compute Nodes, or when Task scheduling is re-enabled on this Compute Node.
+ // Enter offline state as soon as Tasks have been terminated.
+ DisableComputeNodeSchedulingOptionRequeue DisableComputeNodeSchedulingOption = "requeue"
+ // DisableComputeNodeSchedulingOptionTaskCompletion Allow currently running Tasks to complete. Schedule no
+ // new Tasks while waiting. Enter offline state when all Tasks have completed.
+ DisableComputeNodeSchedulingOptionTaskCompletion DisableComputeNodeSchedulingOption = "taskcompletion"
+ // DisableComputeNodeSchedulingOptionTerminate Terminate running Tasks. The Tasks will be completed with
+ // failureInfo indicating that they were terminated, and will not run again. Enter offline state as soon as
+ // Tasks have been terminated.
+ DisableComputeNodeSchedulingOptionTerminate DisableComputeNodeSchedulingOption = "terminate"
+)
+
+// PossibleDisableComputeNodeSchedulingOptionValues returns an array of possible values for the DisableComputeNodeSchedulingOption const type.
+func PossibleDisableComputeNodeSchedulingOptionValues() []DisableComputeNodeSchedulingOption {
+ return []DisableComputeNodeSchedulingOption{DisableComputeNodeSchedulingOptionRequeue, DisableComputeNodeSchedulingOptionTaskCompletion, DisableComputeNodeSchedulingOptionTerminate}
+}
+
+// DisableJobOption enumerates the values for disable job option.
+type DisableJobOption string
+
+const (
+ // DisableJobOptionRequeue Terminate running Tasks and requeue them. The Tasks will run again when the Job
+ // is enabled.
+ DisableJobOptionRequeue DisableJobOption = "requeue"
+ // DisableJobOptionTerminate Terminate running Tasks. The Tasks will be completed with failureInfo
+ // indicating that they were terminated, and will not run again.
+ DisableJobOptionTerminate DisableJobOption = "terminate"
+ // DisableJobOptionWait Allow currently running Tasks to complete.
+ DisableJobOptionWait DisableJobOption = "wait"
+)
+
+// PossibleDisableJobOptionValues returns an array of possible values for the DisableJobOption const type.
+func PossibleDisableJobOptionValues() []DisableJobOption {
+ return []DisableJobOption{DisableJobOptionRequeue, DisableJobOptionTerminate, DisableJobOptionWait}
+}
+
+// DynamicVNetAssignmentScope enumerates the values for dynamic v net assignment scope.
+type DynamicVNetAssignmentScope string
+
+const (
+ // DynamicVNetAssignmentScopeJob Dynamic VNet assignment is done per-job.
+ DynamicVNetAssignmentScopeJob DynamicVNetAssignmentScope = "job"
+ // DynamicVNetAssignmentScopeNone No dynamic VNet assignment is enabled.
+ DynamicVNetAssignmentScopeNone DynamicVNetAssignmentScope = "none"
+)
+
+// PossibleDynamicVNetAssignmentScopeValues returns an array of possible values for the DynamicVNetAssignmentScope const type.
+func PossibleDynamicVNetAssignmentScopeValues() []DynamicVNetAssignmentScope {
+ return []DynamicVNetAssignmentScope{DynamicVNetAssignmentScopeJob, DynamicVNetAssignmentScopeNone}
+}
+
+// ElevationLevel enumerates the values for elevation level.
+type ElevationLevel string
+
+const (
+ // Admin The user is a user with elevated access and operates with full Administrator permissions.
+ Admin ElevationLevel = "admin"
+ // NonAdmin The user is a standard user without elevated access.
+ NonAdmin ElevationLevel = "nonadmin"
+)
+
+// PossibleElevationLevelValues returns an array of possible values for the ElevationLevel const type.
+func PossibleElevationLevelValues() []ElevationLevel {
+ return []ElevationLevel{Admin, NonAdmin}
+}
+
+// ErrorCategory enumerates the values for error category.
+type ErrorCategory string
+
+const (
+ // ServerError The error is due to an internal server issue.
+ ServerError ErrorCategory = "servererror"
+ // UserError The error is due to a user issue, such as misconfiguration.
+ UserError ErrorCategory = "usererror"
+)
+
+// PossibleErrorCategoryValues returns an array of possible values for the ErrorCategory const type.
+func PossibleErrorCategoryValues() []ErrorCategory {
+ return []ErrorCategory{ServerError, UserError}
+}
+
+// InboundEndpointProtocol enumerates the values for inbound endpoint protocol.
+type InboundEndpointProtocol string
+
+const (
+ // TCP Use TCP for the endpoint.
+ TCP InboundEndpointProtocol = "tcp"
+ // UDP Use UDP for the endpoint.
+ UDP InboundEndpointProtocol = "udp"
+)
+
+// PossibleInboundEndpointProtocolValues returns an array of possible values for the InboundEndpointProtocol const type.
+func PossibleInboundEndpointProtocolValues() []InboundEndpointProtocol {
+ return []InboundEndpointProtocol{TCP, UDP}
+}
+
+// JobAction enumerates the values for job action.
+type JobAction string
+
+const (
+ // JobActionDisable Disable the Job. This is equivalent to calling the disable Job API, with a disableTasks
+ // value of requeue.
+ JobActionDisable JobAction = "disable"
+ // JobActionNone Take no action.
+ JobActionNone JobAction = "none"
+ // JobActionTerminate Terminate the Job. The terminateReason in the Job's executionInfo is set to
+ // "TaskFailed".
+ JobActionTerminate JobAction = "terminate"
+)
+
+// PossibleJobActionValues returns an array of possible values for the JobAction const type.
+func PossibleJobActionValues() []JobAction {
+ return []JobAction{JobActionDisable, JobActionNone, JobActionTerminate}
+}
+
+// JobPreparationTaskState enumerates the values for job preparation task state.
+type JobPreparationTaskState string
+
+const (
+ // JobPreparationTaskStateCompleted The Task has exited with exit code 0, or the Task has exhausted its
+ // retry limit, or the Batch service was unable to start the Task due to Task preparation errors (such as
+ // resource file download failures).
+ JobPreparationTaskStateCompleted JobPreparationTaskState = "completed"
+ // JobPreparationTaskStateRunning The Task is currently running (including retrying).
+ JobPreparationTaskStateRunning JobPreparationTaskState = "running"
+)
+
+// PossibleJobPreparationTaskStateValues returns an array of possible values for the JobPreparationTaskState const type.
+func PossibleJobPreparationTaskStateValues() []JobPreparationTaskState {
+ return []JobPreparationTaskState{JobPreparationTaskStateCompleted, JobPreparationTaskStateRunning}
+}
+
+// JobReleaseTaskState enumerates the values for job release task state.
+type JobReleaseTaskState string
+
+const (
+ // JobReleaseTaskStateCompleted The Task has exited with exit code 0, or the Task has exhausted its retry
+ // limit, or the Batch service was unable to start the Task due to Task preparation errors (such as
+ // resource file download failures).
+ JobReleaseTaskStateCompleted JobReleaseTaskState = "completed"
+ // JobReleaseTaskStateRunning The Task is currently running (including retrying).
+ JobReleaseTaskStateRunning JobReleaseTaskState = "running"
+)
+
+// PossibleJobReleaseTaskStateValues returns an array of possible values for the JobReleaseTaskState const type.
+func PossibleJobReleaseTaskStateValues() []JobReleaseTaskState {
+ return []JobReleaseTaskState{JobReleaseTaskStateCompleted, JobReleaseTaskStateRunning}
+}
+
+// JobScheduleState enumerates the values for job schedule state.
+type JobScheduleState string
+
+const (
+ // JobScheduleStateActive The Job Schedule is active and will create Jobs as per its schedule.
+ JobScheduleStateActive JobScheduleState = "active"
+ // JobScheduleStateCompleted The Job Schedule has terminated, either by reaching its end time or by the
+ // user terminating it explicitly.
+ JobScheduleStateCompleted JobScheduleState = "completed"
+ // JobScheduleStateDeleting The user has requested that the Job Schedule be deleted, but the delete
+ // operation is still in progress. The scheduler will not initiate any new Jobs for this Job Schedule, and
+ // will delete any existing Jobs and Tasks under the Job Schedule, including any active Job. The Job
+ // Schedule will be deleted when all Jobs and Tasks under the Job Schedule have been deleted.
+ JobScheduleStateDeleting JobScheduleState = "deleting"
+ // JobScheduleStateDisabled The user has disabled the Job Schedule. The scheduler will not initiate any new
+ // Jobs will on this schedule, but any existing active Job will continue to run.
+ JobScheduleStateDisabled JobScheduleState = "disabled"
+ // JobScheduleStateTerminating The Job Schedule has no more work to do, or has been explicitly terminated
+ // by the user, but the termination operation is still in progress. The scheduler will not initiate any new
+ // Jobs for this Job Schedule, nor is any existing Job active.
+ JobScheduleStateTerminating JobScheduleState = "terminating"
+)
+
+// PossibleJobScheduleStateValues returns an array of possible values for the JobScheduleState const type.
+func PossibleJobScheduleStateValues() []JobScheduleState {
+ return []JobScheduleState{JobScheduleStateActive, JobScheduleStateCompleted, JobScheduleStateDeleting, JobScheduleStateDisabled, JobScheduleStateTerminating}
+}
+
+// JobState enumerates the values for job state.
+type JobState string
+
+const (
+ // JobStateActive The Job is available to have Tasks scheduled.
+ JobStateActive JobState = "active"
+ // JobStateCompleted All Tasks have terminated, and the system will not accept any more Tasks or any
+ // further changes to the Job.
+ JobStateCompleted JobState = "completed"
+ // JobStateDeleting A user has requested that the Job be deleted, but the delete operation is still in
+ // progress (for example, because the system is still terminating running Tasks).
+ JobStateDeleting JobState = "deleting"
+ // JobStateDisabled A user has disabled the Job. No Tasks are running, and no new Tasks will be scheduled.
+ JobStateDisabled JobState = "disabled"
+ // JobStateDisabling A user has requested that the Job be disabled, but the disable operation is still in
+ // progress (for example, waiting for Tasks to terminate).
+ JobStateDisabling JobState = "disabling"
+ // JobStateEnabling A user has requested that the Job be enabled, but the enable operation is still in
+ // progress.
+ JobStateEnabling JobState = "enabling"
+ // JobStateTerminating The Job is about to complete, either because a Job Manager Task has completed or
+ // because the user has terminated the Job, but the terminate operation is still in progress (for example,
+ // because Job Release Tasks are running).
+ JobStateTerminating JobState = "terminating"
+)
+
+// PossibleJobStateValues returns an array of possible values for the JobState const type.
+func PossibleJobStateValues() []JobState {
+ return []JobState{JobStateActive, JobStateCompleted, JobStateDeleting, JobStateDisabled, JobStateDisabling, JobStateEnabling, JobStateTerminating}
+}
+
+// LoginMode enumerates the values for login mode.
+type LoginMode string
+
+const (
+ // Batch The LOGON32_LOGON_BATCH Win32 login mode. The batch login mode is recommended for long running
+ // parallel processes.
+ Batch LoginMode = "batch"
+ // Interactive The LOGON32_LOGON_INTERACTIVE Win32 login mode. UAC is enabled on Windows
+ // VirtualMachineConfiguration Pools. If this option is used with an elevated user identity in a Windows
+ // VirtualMachineConfiguration Pool, the user session will not be elevated unless the application executed
+ // by the Task command line is configured to always require administrative privilege or to always require
+ // maximum privilege.
+ Interactive LoginMode = "interactive"
+)
+
+// PossibleLoginModeValues returns an array of possible values for the LoginMode const type.
+func PossibleLoginModeValues() []LoginMode {
+ return []LoginMode{Batch, Interactive}
+}
+
+// NetworkSecurityGroupRuleAccess enumerates the values for network security group rule access.
+type NetworkSecurityGroupRuleAccess string
+
+const (
+ // Allow Allow access.
+ Allow NetworkSecurityGroupRuleAccess = "allow"
+ // Deny Deny access.
+ Deny NetworkSecurityGroupRuleAccess = "deny"
+)
+
+// PossibleNetworkSecurityGroupRuleAccessValues returns an array of possible values for the NetworkSecurityGroupRuleAccess const type.
+func PossibleNetworkSecurityGroupRuleAccessValues() []NetworkSecurityGroupRuleAccess {
+ return []NetworkSecurityGroupRuleAccess{Allow, Deny}
+}
+
+// OnAllTasksComplete enumerates the values for on all tasks complete.
+type OnAllTasksComplete string
+
+const (
+ // NoAction Do nothing. The Job remains active unless terminated or disabled by some other means.
+ NoAction OnAllTasksComplete = "noaction"
+ // TerminateJob Terminate the Job. The Job's terminateReason is set to 'AllTasksComplete'.
+ TerminateJob OnAllTasksComplete = "terminatejob"
+)
+
+// PossibleOnAllTasksCompleteValues returns an array of possible values for the OnAllTasksComplete const type.
+func PossibleOnAllTasksCompleteValues() []OnAllTasksComplete {
+ return []OnAllTasksComplete{NoAction, TerminateJob}
+}
+
+// OnTaskFailure enumerates the values for on task failure.
+type OnTaskFailure string
+
+const (
+ // OnTaskFailureNoAction Do nothing. The Job remains active unless terminated or disabled by some other
+ // means.
+ OnTaskFailureNoAction OnTaskFailure = "noaction"
+ // OnTaskFailurePerformExitOptionsJobAction Take the action associated with the Task exit condition in the
+ // Task's exitConditions collection. (This may still result in no action being taken, if that is what the
+ // Task specifies.)
+ OnTaskFailurePerformExitOptionsJobAction OnTaskFailure = "performexitoptionsjobaction"
+)
+
+// PossibleOnTaskFailureValues returns an array of possible values for the OnTaskFailure const type.
+func PossibleOnTaskFailureValues() []OnTaskFailure {
+ return []OnTaskFailure{OnTaskFailureNoAction, OnTaskFailurePerformExitOptionsJobAction}
+}
+
+// OSType enumerates the values for os type.
+type OSType string
+
+const (
+ // Linux The Linux operating system.
+ Linux OSType = "linux"
+ // Windows The Windows operating system.
+ Windows OSType = "windows"
+)
+
+// PossibleOSTypeValues returns an array of possible values for the OSType const type.
+func PossibleOSTypeValues() []OSType {
+ return []OSType{Linux, Windows}
+}
+
+// OutputFileUploadCondition enumerates the values for output file upload condition.
+type OutputFileUploadCondition string
+
+const (
+ // OutputFileUploadConditionTaskCompletion Upload the file(s) after the Task process exits, no matter what
+ // the exit code was.
+ OutputFileUploadConditionTaskCompletion OutputFileUploadCondition = "taskcompletion"
+ // OutputFileUploadConditionTaskFailure Upload the file(s) only after the Task process exits with a nonzero
+ // exit code.
+ OutputFileUploadConditionTaskFailure OutputFileUploadCondition = "taskfailure"
+ // OutputFileUploadConditionTaskSuccess Upload the file(s) only after the Task process exits with an exit
+ // code of 0.
+ OutputFileUploadConditionTaskSuccess OutputFileUploadCondition = "tasksuccess"
+)
+
+// PossibleOutputFileUploadConditionValues returns an array of possible values for the OutputFileUploadCondition const type.
+func PossibleOutputFileUploadConditionValues() []OutputFileUploadCondition {
+ return []OutputFileUploadCondition{OutputFileUploadConditionTaskCompletion, OutputFileUploadConditionTaskFailure, OutputFileUploadConditionTaskSuccess}
+}
+
+// PoolLifetimeOption enumerates the values for pool lifetime option.
+type PoolLifetimeOption string
+
+const (
+ // PoolLifetimeOptionJob The Pool exists for the lifetime of the Job to which it is dedicated. The Batch
+ // service creates the Pool when it creates the Job. If the 'job' option is applied to a Job Schedule, the
+ // Batch service creates a new auto Pool for every Job created on the schedule.
+ PoolLifetimeOptionJob PoolLifetimeOption = "job"
+ // PoolLifetimeOptionJobSchedule The Pool exists for the lifetime of the Job Schedule. The Batch Service
+ // creates the Pool when it creates the first Job on the schedule. You may apply this option only to Job
+ // Schedules, not to Jobs.
+ PoolLifetimeOptionJobSchedule PoolLifetimeOption = "jobschedule"
+)
+
+// PossiblePoolLifetimeOptionValues returns an array of possible values for the PoolLifetimeOption const type.
+func PossiblePoolLifetimeOptionValues() []PoolLifetimeOption {
+ return []PoolLifetimeOption{PoolLifetimeOptionJob, PoolLifetimeOptionJobSchedule}
+}
+
+// PoolState enumerates the values for pool state.
+type PoolState string
+
+const (
+ // PoolStateActive The Pool is available to run Tasks subject to the availability of Compute Nodes.
+ PoolStateActive PoolState = "active"
+ // PoolStateDeleting The user has requested that the Pool be deleted, but the delete operation has not yet
+ // completed.
+ PoolStateDeleting PoolState = "deleting"
+)
+
+// PossiblePoolStateValues returns an array of possible values for the PoolState const type.
+func PossiblePoolStateValues() []PoolState {
+ return []PoolState{PoolStateActive, PoolStateDeleting}
+}
+
+// SchedulingState enumerates the values for scheduling state.
+type SchedulingState string
+
+const (
+ // Disabled No new Tasks will be scheduled on the Compute Node. Tasks already running on the Compute Node
+ // may still run to completion. All Compute Nodes start with scheduling enabled.
+ Disabled SchedulingState = "disabled"
+ // Enabled Tasks can be scheduled on the Compute Node.
+ Enabled SchedulingState = "enabled"
+)
+
+// PossibleSchedulingStateValues returns an array of possible values for the SchedulingState const type.
+func PossibleSchedulingStateValues() []SchedulingState {
+ return []SchedulingState{Disabled, Enabled}
+}
+
+// StartTaskState enumerates the values for start task state.
+type StartTaskState string
+
+const (
+ // StartTaskStateCompleted The StartTask has exited with exit code 0, or the StartTask has failed and the
+ // retry limit has reached, or the StartTask process did not run due to Task preparation errors (such as
+ // resource file download failures).
+ StartTaskStateCompleted StartTaskState = "completed"
+ // StartTaskStateRunning The StartTask is currently running.
+ StartTaskStateRunning StartTaskState = "running"
+)
+
+// PossibleStartTaskStateValues returns an array of possible values for the StartTaskState const type.
+func PossibleStartTaskStateValues() []StartTaskState {
+ return []StartTaskState{StartTaskStateCompleted, StartTaskStateRunning}
+}
+
+// StorageAccountType enumerates the values for storage account type.
+type StorageAccountType string
+
+const (
+ // PremiumLRS The data disk should use premium locally redundant storage.
+ PremiumLRS StorageAccountType = "premium_lrs"
+ // StandardLRS The data disk should use standard locally redundant storage.
+ StandardLRS StorageAccountType = "standard_lrs"
+)
+
+// PossibleStorageAccountTypeValues returns an array of possible values for the StorageAccountType const type.
+func PossibleStorageAccountTypeValues() []StorageAccountType {
+ return []StorageAccountType{PremiumLRS, StandardLRS}
+}
+
+// SubtaskState enumerates the values for subtask state.
+type SubtaskState string
+
+const (
+ // SubtaskStateCompleted The Task is no longer eligible to run, usually because the Task has finished
+ // successfully, or the Task has finished unsuccessfully and has exhausted its retry limit. A Task is also
+ // marked as completed if an error occurred launching the Task, or when the Task has been terminated.
+ SubtaskStateCompleted SubtaskState = "completed"
+ // SubtaskStatePreparing The Task has been assigned to a Compute Node, but is waiting for a required Job
+ // Preparation Task to complete on the Compute Node. If the Job Preparation Task succeeds, the Task will
+ // move to running. If the Job Preparation Task fails, the Task will return to active and will be eligible
+ // to be assigned to a different Compute Node.
+ SubtaskStatePreparing SubtaskState = "preparing"
+ // SubtaskStateRunning The Task is running on a Compute Node. This includes task-level preparation such as
+ // downloading resource files or deploying Packages specified on the Task - it does not necessarily mean
+ // that the Task command line has started executing.
+ SubtaskStateRunning SubtaskState = "running"
+)
+
+// PossibleSubtaskStateValues returns an array of possible values for the SubtaskState const type.
+func PossibleSubtaskStateValues() []SubtaskState {
+ return []SubtaskState{SubtaskStateCompleted, SubtaskStatePreparing, SubtaskStateRunning}
+}
+
+// TaskAddStatus enumerates the values for task add status.
+type TaskAddStatus string
+
+const (
+ // TaskAddStatusClientError The Task failed to add due to a client error and should not be retried without
+ // modifying the request as appropriate.
+ TaskAddStatusClientError TaskAddStatus = "clienterror"
+ // TaskAddStatusServerError Task failed to add due to a server error and can be retried without
+ // modification.
+ TaskAddStatusServerError TaskAddStatus = "servererror"
+ // TaskAddStatusSuccess The Task was added successfully.
+ TaskAddStatusSuccess TaskAddStatus = "success"
+)
+
+// PossibleTaskAddStatusValues returns an array of possible values for the TaskAddStatus const type.
+func PossibleTaskAddStatusValues() []TaskAddStatus {
+ return []TaskAddStatus{TaskAddStatusClientError, TaskAddStatusServerError, TaskAddStatusSuccess}
+}
+
+// TaskExecutionResult enumerates the values for task execution result.
+type TaskExecutionResult string
+
+const (
+ // Failure There was an error during processing of the Task. The failure may have occurred before the Task
+ // process was launched, while the Task process was executing, or after the Task process exited.
+ Failure TaskExecutionResult = "failure"
+ // Success The Task ran successfully.
+ Success TaskExecutionResult = "success"
+)
+
+// PossibleTaskExecutionResultValues returns an array of possible values for the TaskExecutionResult const type.
+func PossibleTaskExecutionResultValues() []TaskExecutionResult {
+ return []TaskExecutionResult{Failure, Success}
+}
+
+// TaskState enumerates the values for task state.
+type TaskState string
+
+const (
+ // TaskStateActive The Task is queued and able to run, but is not currently assigned to a Compute Node. A
+ // Task enters this state when it is created, when it is enabled after being disabled, or when it is
+ // awaiting a retry after a failed run.
+ TaskStateActive TaskState = "active"
+ // TaskStateCompleted The Task is no longer eligible to run, usually because the Task has finished
+ // successfully, or the Task has finished unsuccessfully and has exhausted its retry limit. A Task is also
+ // marked as completed if an error occurred launching the Task, or when the Task has been terminated.
+ TaskStateCompleted TaskState = "completed"
+ // TaskStatePreparing The Task has been assigned to a Compute Node, but is waiting for a required Job
+ // Preparation Task to complete on the Compute Node. If the Job Preparation Task succeeds, the Task will
+ // move to running. If the Job Preparation Task fails, the Task will return to active and will be eligible
+ // to be assigned to a different Compute Node.
+ TaskStatePreparing TaskState = "preparing"
+ // TaskStateRunning The Task is running on a Compute Node. This includes task-level preparation such as
+ // downloading resource files or deploying Packages specified on the Task - it does not necessarily mean
+ // that the Task command line has started executing.
+ TaskStateRunning TaskState = "running"
+)
+
+// PossibleTaskStateValues returns an array of possible values for the TaskState const type.
+func PossibleTaskStateValues() []TaskState {
+ return []TaskState{TaskStateActive, TaskStateCompleted, TaskStatePreparing, TaskStateRunning}
+}
+
+// VerificationType enumerates the values for verification type.
+type VerificationType string
+
+const (
+ // Unverified The associated Compute Node agent SKU should have binary compatibility with the Image, but
+ // specific functionality has not been verified.
+ Unverified VerificationType = "unverified"
+ // Verified The Image is guaranteed to be compatible with the associated Compute Node agent SKU and all
+ // Batch features have been confirmed to work as expected.
+ Verified VerificationType = "verified"
+)
+
+// PossibleVerificationTypeValues returns an array of possible values for the VerificationType const type.
+func PossibleVerificationTypeValues() []VerificationType {
+ return []VerificationType{Unverified, Verified}
+}
+
+// AccountListSupportedImagesResult ...
+type AccountListSupportedImagesResult struct {
+ autorest.Response `json:"-"`
+ Value *[]ImageInformation `json:"value,omitempty"`
+ OdataNextLink *string `json:"odata.nextLink,omitempty"`
+}
+
+// AccountListSupportedImagesResultIterator provides access to a complete listing of ImageInformation
+// values.
+type AccountListSupportedImagesResultIterator struct {
+ i int
+ page AccountListSupportedImagesResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *AccountListSupportedImagesResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountListSupportedImagesResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *AccountListSupportedImagesResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter AccountListSupportedImagesResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter AccountListSupportedImagesResultIterator) Response() AccountListSupportedImagesResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter AccountListSupportedImagesResultIterator) Value() ImageInformation {
+ if !iter.page.NotDone() {
+ return ImageInformation{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the AccountListSupportedImagesResultIterator type.
+func NewAccountListSupportedImagesResultIterator(page AccountListSupportedImagesResultPage) AccountListSupportedImagesResultIterator {
+ return AccountListSupportedImagesResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (alsir AccountListSupportedImagesResult) IsEmpty() bool {
+ return alsir.Value == nil || len(*alsir.Value) == 0
+}
+
+// accountListSupportedImagesResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (alsir AccountListSupportedImagesResult) accountListSupportedImagesResultPreparer(ctx context.Context) (*http.Request, error) {
+ if alsir.OdataNextLink == nil || len(to.String(alsir.OdataNextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(alsir.OdataNextLink)))
+}
+
+// AccountListSupportedImagesResultPage contains a page of ImageInformation values.
+type AccountListSupportedImagesResultPage struct {
+ fn func(context.Context, AccountListSupportedImagesResult) (AccountListSupportedImagesResult, error)
+ alsir AccountListSupportedImagesResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *AccountListSupportedImagesResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountListSupportedImagesResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.alsir)
+ if err != nil {
+ return err
+ }
+ page.alsir = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *AccountListSupportedImagesResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page AccountListSupportedImagesResultPage) NotDone() bool {
+ return !page.alsir.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page AccountListSupportedImagesResultPage) Response() AccountListSupportedImagesResult {
+ return page.alsir
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page AccountListSupportedImagesResultPage) Values() []ImageInformation {
+ if page.alsir.IsEmpty() {
+ return nil
+ }
+ return *page.alsir.Value
+}
+
+// Creates a new instance of the AccountListSupportedImagesResultPage type.
+func NewAccountListSupportedImagesResultPage(getNextPage func(context.Context, AccountListSupportedImagesResult) (AccountListSupportedImagesResult, error)) AccountListSupportedImagesResultPage {
+ return AccountListSupportedImagesResultPage{fn: getNextPage}
+}
+
+// AffinityInformation ...
+type AffinityInformation struct {
+ // AffinityID - You can pass the affinityId of a Node to indicate that this Task needs to run on that Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere.
+ AffinityID *string `json:"affinityId,omitempty"`
+}
+
+// ApplicationListResult ...
+type ApplicationListResult struct {
+ autorest.Response `json:"-"`
+ Value *[]ApplicationSummary `json:"value,omitempty"`
+ OdataNextLink *string `json:"odata.nextLink,omitempty"`
+}
+
+// ApplicationListResultIterator provides access to a complete listing of ApplicationSummary values.
+type ApplicationListResultIterator struct {
+ i int
+ page ApplicationListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *ApplicationListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *ApplicationListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter ApplicationListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter ApplicationListResultIterator) Response() ApplicationListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter ApplicationListResultIterator) Value() ApplicationSummary {
+ if !iter.page.NotDone() {
+ return ApplicationSummary{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the ApplicationListResultIterator type.
+func NewApplicationListResultIterator(page ApplicationListResultPage) ApplicationListResultIterator {
+ return ApplicationListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (alr ApplicationListResult) IsEmpty() bool {
+ return alr.Value == nil || len(*alr.Value) == 0
+}
+
+// applicationListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (alr ApplicationListResult) applicationListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if alr.OdataNextLink == nil || len(to.String(alr.OdataNextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(alr.OdataNextLink)))
+}
+
+// ApplicationListResultPage contains a page of ApplicationSummary values.
+type ApplicationListResultPage struct {
+ fn func(context.Context, ApplicationListResult) (ApplicationListResult, error)
+ alr ApplicationListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *ApplicationListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.alr)
+ if err != nil {
+ return err
+ }
+ page.alr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *ApplicationListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page ApplicationListResultPage) NotDone() bool {
+ return !page.alr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page ApplicationListResultPage) Response() ApplicationListResult {
+ return page.alr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page ApplicationListResultPage) Values() []ApplicationSummary {
+ if page.alr.IsEmpty() {
+ return nil
+ }
+ return *page.alr.Value
+}
+
+// Creates a new instance of the ApplicationListResultPage type.
+func NewApplicationListResultPage(getNextPage func(context.Context, ApplicationListResult) (ApplicationListResult, error)) ApplicationListResultPage {
+ return ApplicationListResultPage{fn: getNextPage}
+}
+
+// ApplicationPackageReference ...
+type ApplicationPackageReference struct {
+ ApplicationID *string `json:"applicationId,omitempty"`
+ // Version - If this is omitted on a Pool, and no default version is specified for this application, the request fails with the error code InvalidApplicationPackageReferences and HTTP status code 409. If this is omitted on a Task, and no default version is specified for this application, the Task fails with a pre-processing error.
+ Version *string `json:"version,omitempty"`
+}
+
+// ApplicationSummary ...
+type ApplicationSummary struct {
+ autorest.Response `json:"-"`
+ ID *string `json:"id,omitempty"`
+ DisplayName *string `json:"displayName,omitempty"`
+ Versions *[]string `json:"versions,omitempty"`
+}
+
+// AuthenticationTokenSettings ...
+type AuthenticationTokenSettings struct {
+ // Access - The authentication token grants access to a limited set of Batch service operations. Currently the only supported value for the access property is 'job', which grants access to all operations related to the Job which contains the Task.
+ Access *[]AccessScope `json:"access,omitempty"`
+}
+
+// AutoPoolSpecification ...
+type AutoPoolSpecification struct {
+ // AutoPoolIDPrefix - The Batch service assigns each auto Pool a unique identifier on creation. To distinguish between Pools created for different purposes, you can specify this element to add a prefix to the ID that is assigned. The prefix can be up to 20 characters long.
+ AutoPoolIDPrefix *string `json:"autoPoolIdPrefix,omitempty"`
+ // PoolLifetimeOption - Possible values include: 'PoolLifetimeOptionJobSchedule', 'PoolLifetimeOptionJob'
+ PoolLifetimeOption PoolLifetimeOption `json:"poolLifetimeOption,omitempty"`
+ // KeepAlive - If false, the Batch service deletes the Pool once its lifetime (as determined by the poolLifetimeOption setting) expires; that is, when the Job or Job Schedule completes. If true, the Batch service does not delete the Pool automatically. It is up to the user to delete auto Pools created with this option.
+ KeepAlive *bool `json:"keepAlive,omitempty"`
+ Pool *PoolSpecification `json:"pool,omitempty"`
+}
+
+// AutoScaleRun ...
+type AutoScaleRun struct {
+ autorest.Response `json:"-"`
+ Timestamp *date.Time `json:"timestamp,omitempty"`
+ // Results - Each variable value is returned in the form $variable=value, and variables are separated by semicolons.
+ Results *string `json:"results,omitempty"`
+ Error *AutoScaleRunError `json:"error,omitempty"`
+}
+
+// AutoScaleRunError ...
+type AutoScaleRunError struct {
+ Code *string `json:"code,omitempty"`
+ Message *string `json:"message,omitempty"`
+ Values *[]NameValuePair `json:"values,omitempty"`
+}
+
+// AutoUserSpecification ...
+type AutoUserSpecification struct {
+ // Scope - The default value is pool. If the pool is running Windows a value of Task should be specified if stricter isolation between tasks is required. For example, if the task mutates the registry in a way which could impact other tasks, or if certificates have been specified on the pool which should not be accessible by normal tasks but should be accessible by StartTasks. Possible values include: 'Task', 'Pool'
+ Scope AutoUserScope `json:"scope,omitempty"`
+ // ElevationLevel - The default value is nonAdmin. Possible values include: 'NonAdmin', 'Admin'
+ ElevationLevel ElevationLevel `json:"elevationLevel,omitempty"`
+}
+
+// AzureBlobFileSystemConfiguration ...
+type AzureBlobFileSystemConfiguration struct {
+ AccountName *string `json:"accountName,omitempty"`
+ ContainerName *string `json:"containerName,omitempty"`
+ // AccountKey - This property is mutually exclusive with sasKey and one must be specified.
+ AccountKey *string `json:"accountKey,omitempty"`
+ // SasKey - This property is mutually exclusive with accountKey and one must be specified.
+ SasKey *string `json:"sasKey,omitempty"`
+ // BlobfuseOptions - These are 'net use' options in Windows and 'mount' options in Linux.
+ BlobfuseOptions *string `json:"blobfuseOptions,omitempty"`
+ // RelativeMountPath - All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable.
+ RelativeMountPath *string `json:"relativeMountPath,omitempty"`
+}
+
+// AzureFileShareConfiguration ...
+type AzureFileShareConfiguration struct {
+ AccountName *string `json:"accountName,omitempty"`
+ // AzureFileURL - This is of the form 'https://{account}.file.core.windows.net/'.
+ AzureFileURL *string `json:"azureFileUrl,omitempty"`
+ AccountKey *string `json:"accountKey,omitempty"`
+ // RelativeMountPath - All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable.
+ RelativeMountPath *string `json:"relativeMountPath,omitempty"`
+ // MountOptions - These are 'net use' options in Windows and 'mount' options in Linux.
+ MountOptions *string `json:"mountOptions,omitempty"`
+}
+
+// Certificate a Certificate that can be installed on Compute Nodes and can be used to authenticate
+// operations on the machine.
+type Certificate struct {
+ autorest.Response `json:"-"`
+ Thumbprint *string `json:"thumbprint,omitempty"`
+ ThumbprintAlgorithm *string `json:"thumbprintAlgorithm,omitempty"`
+ URL *string `json:"url,omitempty"`
+ // State - Possible values include: 'Active', 'Deleting', 'DeleteFailed'
+ State CertificateState `json:"state,omitempty"`
+ StateTransitionTime *date.Time `json:"stateTransitionTime,omitempty"`
+ // PreviousState - This property is not set if the Certificate is in its initial active state. Possible values include: 'Active', 'Deleting', 'DeleteFailed'
+ PreviousState CertificateState `json:"previousState,omitempty"`
+ // PreviousStateTransitionTime - This property is not set if the Certificate is in its initial Active state.
+ PreviousStateTransitionTime *date.Time `json:"previousStateTransitionTime,omitempty"`
+ PublicData *string `json:"publicData,omitempty"`
+ // DeleteCertificateError - This property is set only if the Certificate is in the DeleteFailed state.
+ DeleteCertificateError *DeleteCertificateError `json:"deleteCertificateError,omitempty"`
+}
+
+// CertificateAddParameter ...
+type CertificateAddParameter struct {
+ Thumbprint *string `json:"thumbprint,omitempty"`
+ ThumbprintAlgorithm *string `json:"thumbprintAlgorithm,omitempty"`
+ Data *string `json:"data,omitempty"`
+ // CertificateFormat - Possible values include: 'Pfx', 'Cer'
+ CertificateFormat CertificateFormat `json:"certificateFormat,omitempty"`
+ // Password - This is required if the Certificate format is pfx. It should be omitted if the Certificate format is cer.
+ Password *string `json:"password,omitempty"`
+}
+
+// CertificateListResult ...
+type CertificateListResult struct {
+ autorest.Response `json:"-"`
+ Value *[]Certificate `json:"value,omitempty"`
+ OdataNextLink *string `json:"odata.nextLink,omitempty"`
+}
+
+// CertificateListResultIterator provides access to a complete listing of Certificate values.
+type CertificateListResultIterator struct {
+ i int
+ page CertificateListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *CertificateListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CertificateListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *CertificateListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter CertificateListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter CertificateListResultIterator) Response() CertificateListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter CertificateListResultIterator) Value() Certificate {
+ if !iter.page.NotDone() {
+ return Certificate{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the CertificateListResultIterator type.
+func NewCertificateListResultIterator(page CertificateListResultPage) CertificateListResultIterator {
+ return CertificateListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (clr CertificateListResult) IsEmpty() bool {
+ return clr.Value == nil || len(*clr.Value) == 0
+}
+
+// certificateListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (clr CertificateListResult) certificateListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if clr.OdataNextLink == nil || len(to.String(clr.OdataNextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(clr.OdataNextLink)))
+}
+
+// CertificateListResultPage contains a page of Certificate values.
+type CertificateListResultPage struct {
+ fn func(context.Context, CertificateListResult) (CertificateListResult, error)
+ clr CertificateListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *CertificateListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CertificateListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.clr)
+ if err != nil {
+ return err
+ }
+ page.clr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *CertificateListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page CertificateListResultPage) NotDone() bool {
+ return !page.clr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page CertificateListResultPage) Response() CertificateListResult {
+ return page.clr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page CertificateListResultPage) Values() []Certificate {
+ if page.clr.IsEmpty() {
+ return nil
+ }
+ return *page.clr.Value
+}
+
+// Creates a new instance of the CertificateListResultPage type.
+func NewCertificateListResultPage(getNextPage func(context.Context, CertificateListResult) (CertificateListResult, error)) CertificateListResultPage {
+ return CertificateListResultPage{fn: getNextPage}
+}
+
+// CertificateReference ...
+type CertificateReference struct {
+ Thumbprint *string `json:"thumbprint,omitempty"`
+ ThumbprintAlgorithm *string `json:"thumbprintAlgorithm,omitempty"`
+ // StoreLocation - The default value is currentuser. This property is applicable only for Pools configured with Windows Compute Nodes (that is, created with cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows Image reference). For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. Possible values include: 'CurrentUser', 'LocalMachine'
+ StoreLocation CertificateStoreLocation `json:"storeLocation,omitempty"`
+ // StoreName - This property is applicable only for Pools configured with Windows Compute Nodes (that is, created with cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows Image reference). Common store names include: My, Root, CA, Trust, Disallowed, TrustedPeople, TrustedPublisher, AuthRoot, AddressBook, but any custom store name can also be used. The default value is My.
+ StoreName *string `json:"storeName,omitempty"`
+ // Visibility - You can specify more than one visibility in this collection. The default is all Accounts.
+ Visibility *[]CertificateVisibility `json:"visibility,omitempty"`
+}
+
+// CIFSMountConfiguration ...
+type CIFSMountConfiguration struct {
+ Username *string `json:"username,omitempty"`
+ Source *string `json:"source,omitempty"`
+ // RelativeMountPath - All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable.
+ RelativeMountPath *string `json:"relativeMountPath,omitempty"`
+ // MountOptions - These are 'net use' options in Windows and 'mount' options in Linux.
+ MountOptions *string `json:"mountOptions,omitempty"`
+ Password *string `json:"password,omitempty"`
+}
+
+// CloudJob ...
+type CloudJob struct {
+ autorest.Response `json:"-"`
+ // ID - The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case).
+ ID *string `json:"id,omitempty"`
+ DisplayName *string `json:"displayName,omitempty"`
+ UsesTaskDependencies *bool `json:"usesTaskDependencies,omitempty"`
+ URL *string `json:"url,omitempty"`
+ // ETag - This is an opaque string. You can use it to detect whether the Job has changed between requests. In particular, you can be pass the ETag when updating a Job to specify that your changes should take effect only if nobody else has modified the Job in the meantime.
+ ETag *string `json:"eTag,omitempty"`
+ // LastModified - This is the last time at which the Job level data, such as the Job state or priority, changed. It does not factor in task-level changes such as adding new Tasks or Tasks changing state.
+ LastModified *date.Time `json:"lastModified,omitempty"`
+ CreationTime *date.Time `json:"creationTime,omitempty"`
+ // State - Possible values include: 'JobStateActive', 'JobStateDisabling', 'JobStateDisabled', 'JobStateEnabling', 'JobStateTerminating', 'JobStateCompleted', 'JobStateDeleting'
+ State JobState `json:"state,omitempty"`
+ StateTransitionTime *date.Time `json:"stateTransitionTime,omitempty"`
+ // PreviousState - This property is not set if the Job is in its initial Active state. Possible values include: 'JobStateActive', 'JobStateDisabling', 'JobStateDisabled', 'JobStateEnabling', 'JobStateTerminating', 'JobStateCompleted', 'JobStateDeleting'
+ PreviousState JobState `json:"previousState,omitempty"`
+ // PreviousStateTransitionTime - This property is not set if the Job is in its initial Active state.
+ PreviousStateTransitionTime *date.Time `json:"previousStateTransitionTime,omitempty"`
+ // Priority - Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0.
+ Priority *int32 `json:"priority,omitempty"`
+ Constraints *JobConstraints `json:"constraints,omitempty"`
+ JobManagerTask *JobManagerTask `json:"jobManagerTask,omitempty"`
+ // JobPreparationTask - The Job Preparation Task is a special Task run on each Compute Node before any other Task of the Job.
+ JobPreparationTask *JobPreparationTask `json:"jobPreparationTask,omitempty"`
+ // JobReleaseTask - The Job Release Task is a special Task run at the end of the Job on each Compute Node that has run any other Task of the Job.
+ JobReleaseTask *JobReleaseTask `json:"jobReleaseTask,omitempty"`
+ // CommonEnvironmentSettings - Individual Tasks can override an environment setting specified here by specifying the same setting name with a different value.
+ CommonEnvironmentSettings *[]EnvironmentSetting `json:"commonEnvironmentSettings,omitempty"`
+ PoolInfo *PoolInformation `json:"poolInfo,omitempty"`
+ // OnAllTasksComplete - The default is noaction. Possible values include: 'NoAction', 'TerminateJob'
+ OnAllTasksComplete OnAllTasksComplete `json:"onAllTasksComplete,omitempty"`
+ // OnTaskFailure - A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. Possible values include: 'OnTaskFailureNoAction', 'OnTaskFailurePerformExitOptionsJobAction'
+ OnTaskFailure OnTaskFailure `json:"onTaskFailure,omitempty"`
+ NetworkConfiguration *JobNetworkConfiguration `json:"networkConfiguration,omitempty"`
+ // Metadata - The Batch service does not assign any meaning to metadata; it is solely for the use of user code.
+ Metadata *[]MetadataItem `json:"metadata,omitempty"`
+ ExecutionInfo *JobExecutionInformation `json:"executionInfo,omitempty"`
+ // Stats - This property is populated only if the CloudJob was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes.
+ Stats *JobStatistics `json:"stats,omitempty"`
+}
+
+// CloudJobListPreparationAndReleaseTaskStatusResult ...
+type CloudJobListPreparationAndReleaseTaskStatusResult struct {
+ autorest.Response `json:"-"`
+ Value *[]JobPreparationAndReleaseTaskExecutionInformation `json:"value,omitempty"`
+ OdataNextLink *string `json:"odata.nextLink,omitempty"`
+}
+
+// CloudJobListPreparationAndReleaseTaskStatusResultIterator provides access to a complete listing of
+// JobPreparationAndReleaseTaskExecutionInformation values.
+type CloudJobListPreparationAndReleaseTaskStatusResultIterator struct {
+ i int
+ page CloudJobListPreparationAndReleaseTaskStatusResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *CloudJobListPreparationAndReleaseTaskStatusResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CloudJobListPreparationAndReleaseTaskStatusResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *CloudJobListPreparationAndReleaseTaskStatusResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter CloudJobListPreparationAndReleaseTaskStatusResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter CloudJobListPreparationAndReleaseTaskStatusResultIterator) Response() CloudJobListPreparationAndReleaseTaskStatusResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter CloudJobListPreparationAndReleaseTaskStatusResultIterator) Value() JobPreparationAndReleaseTaskExecutionInformation {
+ if !iter.page.NotDone() {
+ return JobPreparationAndReleaseTaskExecutionInformation{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the CloudJobListPreparationAndReleaseTaskStatusResultIterator type.
+func NewCloudJobListPreparationAndReleaseTaskStatusResultIterator(page CloudJobListPreparationAndReleaseTaskStatusResultPage) CloudJobListPreparationAndReleaseTaskStatusResultIterator {
+ return CloudJobListPreparationAndReleaseTaskStatusResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (cjlpartsr CloudJobListPreparationAndReleaseTaskStatusResult) IsEmpty() bool {
+ return cjlpartsr.Value == nil || len(*cjlpartsr.Value) == 0
+}
+
+// cloudJobListPreparationAndReleaseTaskStatusResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (cjlpartsr CloudJobListPreparationAndReleaseTaskStatusResult) cloudJobListPreparationAndReleaseTaskStatusResultPreparer(ctx context.Context) (*http.Request, error) {
+ if cjlpartsr.OdataNextLink == nil || len(to.String(cjlpartsr.OdataNextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(cjlpartsr.OdataNextLink)))
+}
+
+// CloudJobListPreparationAndReleaseTaskStatusResultPage contains a page of
+// JobPreparationAndReleaseTaskExecutionInformation values.
+type CloudJobListPreparationAndReleaseTaskStatusResultPage struct {
+ fn func(context.Context, CloudJobListPreparationAndReleaseTaskStatusResult) (CloudJobListPreparationAndReleaseTaskStatusResult, error)
+ cjlpartsr CloudJobListPreparationAndReleaseTaskStatusResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *CloudJobListPreparationAndReleaseTaskStatusResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CloudJobListPreparationAndReleaseTaskStatusResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.cjlpartsr)
+ if err != nil {
+ return err
+ }
+ page.cjlpartsr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *CloudJobListPreparationAndReleaseTaskStatusResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page CloudJobListPreparationAndReleaseTaskStatusResultPage) NotDone() bool {
+ return !page.cjlpartsr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page CloudJobListPreparationAndReleaseTaskStatusResultPage) Response() CloudJobListPreparationAndReleaseTaskStatusResult {
+ return page.cjlpartsr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page CloudJobListPreparationAndReleaseTaskStatusResultPage) Values() []JobPreparationAndReleaseTaskExecutionInformation {
+ if page.cjlpartsr.IsEmpty() {
+ return nil
+ }
+ return *page.cjlpartsr.Value
+}
+
+// Creates a new instance of the CloudJobListPreparationAndReleaseTaskStatusResultPage type.
+func NewCloudJobListPreparationAndReleaseTaskStatusResultPage(getNextPage func(context.Context, CloudJobListPreparationAndReleaseTaskStatusResult) (CloudJobListPreparationAndReleaseTaskStatusResult, error)) CloudJobListPreparationAndReleaseTaskStatusResultPage {
+ return CloudJobListPreparationAndReleaseTaskStatusResultPage{fn: getNextPage}
+}
+
+// CloudJobListResult ...
+type CloudJobListResult struct {
+ autorest.Response `json:"-"`
+ Value *[]CloudJob `json:"value,omitempty"`
+ OdataNextLink *string `json:"odata.nextLink,omitempty"`
+}
+
+// CloudJobListResultIterator provides access to a complete listing of CloudJob values.
+type CloudJobListResultIterator struct {
+ i int
+ page CloudJobListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *CloudJobListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CloudJobListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *CloudJobListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter CloudJobListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter CloudJobListResultIterator) Response() CloudJobListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter CloudJobListResultIterator) Value() CloudJob {
+ if !iter.page.NotDone() {
+ return CloudJob{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the CloudJobListResultIterator type.
+func NewCloudJobListResultIterator(page CloudJobListResultPage) CloudJobListResultIterator {
+ return CloudJobListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (cjlr CloudJobListResult) IsEmpty() bool {
+ return cjlr.Value == nil || len(*cjlr.Value) == 0
+}
+
+// cloudJobListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (cjlr CloudJobListResult) cloudJobListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if cjlr.OdataNextLink == nil || len(to.String(cjlr.OdataNextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(cjlr.OdataNextLink)))
+}
+
+// CloudJobListResultPage contains a page of CloudJob values.
+type CloudJobListResultPage struct {
+ fn func(context.Context, CloudJobListResult) (CloudJobListResult, error)
+ cjlr CloudJobListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *CloudJobListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CloudJobListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.cjlr)
+ if err != nil {
+ return err
+ }
+ page.cjlr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *CloudJobListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page CloudJobListResultPage) NotDone() bool {
+ return !page.cjlr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page CloudJobListResultPage) Response() CloudJobListResult {
+ return page.cjlr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page CloudJobListResultPage) Values() []CloudJob {
+ if page.cjlr.IsEmpty() {
+ return nil
+ }
+ return *page.cjlr.Value
+}
+
+// Creates a new instance of the CloudJobListResultPage type.
+func NewCloudJobListResultPage(getNextPage func(context.Context, CloudJobListResult) (CloudJobListResult, error)) CloudJobListResultPage {
+ return CloudJobListResultPage{fn: getNextPage}
+}
+
+// CloudJobSchedule ...
+type CloudJobSchedule struct {
+ autorest.Response `json:"-"`
+ ID *string `json:"id,omitempty"`
+ DisplayName *string `json:"displayName,omitempty"`
+ URL *string `json:"url,omitempty"`
+ // ETag - This is an opaque string. You can use it to detect whether the Job Schedule has changed between requests. In particular, you can be pass the ETag with an Update Job Schedule request to specify that your changes should take effect only if nobody else has modified the schedule in the meantime.
+ ETag *string `json:"eTag,omitempty"`
+ // LastModified - This is the last time at which the schedule level data, such as the Job specification or recurrence information, changed. It does not factor in job-level changes such as new Jobs being created or Jobs changing state.
+ LastModified *date.Time `json:"lastModified,omitempty"`
+ CreationTime *date.Time `json:"creationTime,omitempty"`
+ // State - Possible values include: 'JobScheduleStateActive', 'JobScheduleStateCompleted', 'JobScheduleStateDisabled', 'JobScheduleStateTerminating', 'JobScheduleStateDeleting'
+ State JobScheduleState `json:"state,omitempty"`
+ StateTransitionTime *date.Time `json:"stateTransitionTime,omitempty"`
+ // PreviousState - This property is not present if the Job Schedule is in its initial active state. Possible values include: 'JobScheduleStateActive', 'JobScheduleStateCompleted', 'JobScheduleStateDisabled', 'JobScheduleStateTerminating', 'JobScheduleStateDeleting'
+ PreviousState JobScheduleState `json:"previousState,omitempty"`
+ // PreviousStateTransitionTime - This property is not present if the Job Schedule is in its initial active state.
+ PreviousStateTransitionTime *date.Time `json:"previousStateTransitionTime,omitempty"`
+ Schedule *Schedule `json:"schedule,omitempty"`
+ JobSpecification *JobSpecification `json:"jobSpecification,omitempty"`
+ ExecutionInfo *JobScheduleExecutionInformation `json:"executionInfo,omitempty"`
+ // Metadata - The Batch service does not assign any meaning to metadata; it is solely for the use of user code.
+ Metadata *[]MetadataItem `json:"metadata,omitempty"`
+ Stats *JobScheduleStatistics `json:"stats,omitempty"`
+}
+
+// CloudJobScheduleListResult ...
+type CloudJobScheduleListResult struct {
+ autorest.Response `json:"-"`
+ Value *[]CloudJobSchedule `json:"value,omitempty"`
+ OdataNextLink *string `json:"odata.nextLink,omitempty"`
+}
+
+// CloudJobScheduleListResultIterator provides access to a complete listing of CloudJobSchedule values.
+type CloudJobScheduleListResultIterator struct {
+ i int
+ page CloudJobScheduleListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *CloudJobScheduleListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CloudJobScheduleListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *CloudJobScheduleListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter CloudJobScheduleListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter CloudJobScheduleListResultIterator) Response() CloudJobScheduleListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter CloudJobScheduleListResultIterator) Value() CloudJobSchedule {
+ if !iter.page.NotDone() {
+ return CloudJobSchedule{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the CloudJobScheduleListResultIterator type.
+func NewCloudJobScheduleListResultIterator(page CloudJobScheduleListResultPage) CloudJobScheduleListResultIterator {
+ return CloudJobScheduleListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (cjslr CloudJobScheduleListResult) IsEmpty() bool {
+ return cjslr.Value == nil || len(*cjslr.Value) == 0
+}
+
+// cloudJobScheduleListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (cjslr CloudJobScheduleListResult) cloudJobScheduleListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if cjslr.OdataNextLink == nil || len(to.String(cjslr.OdataNextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(cjslr.OdataNextLink)))
+}
+
+// CloudJobScheduleListResultPage contains a page of CloudJobSchedule values.
+type CloudJobScheduleListResultPage struct {
+ fn func(context.Context, CloudJobScheduleListResult) (CloudJobScheduleListResult, error)
+ cjslr CloudJobScheduleListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *CloudJobScheduleListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CloudJobScheduleListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.cjslr)
+ if err != nil {
+ return err
+ }
+ page.cjslr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *CloudJobScheduleListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page CloudJobScheduleListResultPage) NotDone() bool {
+ return !page.cjslr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page CloudJobScheduleListResultPage) Response() CloudJobScheduleListResult {
+ return page.cjslr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page CloudJobScheduleListResultPage) Values() []CloudJobSchedule {
+ if page.cjslr.IsEmpty() {
+ return nil
+ }
+ return *page.cjslr.Value
+}
+
+// Creates a new instance of the CloudJobScheduleListResultPage type.
+func NewCloudJobScheduleListResultPage(getNextPage func(context.Context, CloudJobScheduleListResult) (CloudJobScheduleListResult, error)) CloudJobScheduleListResultPage {
+ return CloudJobScheduleListResultPage{fn: getNextPage}
+}
+
+// CloudPool ...
+type CloudPool struct {
+ autorest.Response `json:"-"`
+ // ID - The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case).
+ ID *string `json:"id,omitempty"`
+ // DisplayName - The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024.
+ DisplayName *string `json:"displayName,omitempty"`
+ URL *string `json:"url,omitempty"`
+ // ETag - This is an opaque string. You can use it to detect whether the Pool has changed between requests. In particular, you can be pass the ETag when updating a Pool to specify that your changes should take effect only if nobody else has modified the Pool in the meantime.
+ ETag *string `json:"eTag,omitempty"`
+ // LastModified - This is the last time at which the Pool level data, such as the targetDedicatedNodes or enableAutoscale settings, changed. It does not factor in node-level changes such as a Compute Node changing state.
+ LastModified *date.Time `json:"lastModified,omitempty"`
+ CreationTime *date.Time `json:"creationTime,omitempty"`
+ // State - Possible values include: 'PoolStateActive', 'PoolStateDeleting'
+ State PoolState `json:"state,omitempty"`
+ StateTransitionTime *date.Time `json:"stateTransitionTime,omitempty"`
+ // AllocationState - Possible values include: 'Steady', 'Resizing', 'Stopping'
+ AllocationState AllocationState `json:"allocationState,omitempty"`
+ AllocationStateTransitionTime *date.Time `json:"allocationStateTransitionTime,omitempty"`
+ // VMSize - For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes).
+ VMSize *string `json:"vmSize,omitempty"`
+ // CloudServiceConfiguration - This property and virtualMachineConfiguration are mutually exclusive and one of the properties must be specified. This property cannot be specified if the Batch Account was created with its poolAllocationMode property set to 'UserSubscription'.
+ CloudServiceConfiguration *CloudServiceConfiguration `json:"cloudServiceConfiguration,omitempty"`
+ // VirtualMachineConfiguration - This property and cloudServiceConfiguration are mutually exclusive and one of the properties must be specified.
+ VirtualMachineConfiguration *VirtualMachineConfiguration `json:"virtualMachineConfiguration,omitempty"`
+ // ResizeTimeout - This is the timeout for the most recent resize operation. (The initial sizing when the Pool is created counts as a resize.) The default value is 15 minutes.
+ ResizeTimeout *string `json:"resizeTimeout,omitempty"`
+ // ResizeErrors - This property is set only if one or more errors occurred during the last Pool resize, and only when the Pool allocationState is Steady.
+ ResizeErrors *[]ResizeError `json:"resizeErrors,omitempty"`
+ CurrentDedicatedNodes *int32 `json:"currentDedicatedNodes,omitempty"`
+ // CurrentLowPriorityNodes - Low-priority Compute Nodes which have been preempted are included in this count.
+ CurrentLowPriorityNodes *int32 `json:"currentLowPriorityNodes,omitempty"`
+ TargetDedicatedNodes *int32 `json:"targetDedicatedNodes,omitempty"`
+ TargetLowPriorityNodes *int32 `json:"targetLowPriorityNodes,omitempty"`
+ // EnableAutoScale - If false, at least one of targetDedicateNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula property is required and the Pool automatically resizes according to the formula. The default value is false.
+ EnableAutoScale *bool `json:"enableAutoScale,omitempty"`
+ // AutoScaleFormula - This property is set only if the Pool automatically scales, i.e. enableAutoScale is true.
+ AutoScaleFormula *string `json:"autoScaleFormula,omitempty"`
+ // AutoScaleEvaluationInterval - This property is set only if the Pool automatically scales, i.e. enableAutoScale is true.
+ AutoScaleEvaluationInterval *string `json:"autoScaleEvaluationInterval,omitempty"`
+ // AutoScaleRun - This property is set only if the Pool automatically scales, i.e. enableAutoScale is true.
+ AutoScaleRun *AutoScaleRun `json:"autoScaleRun,omitempty"`
+ // EnableInterNodeCommunication - This imposes restrictions on which Compute Nodes can be assigned to the Pool. Specifying this value can reduce the chance of the requested number of Compute Nodes to be allocated in the Pool.
+ EnableInterNodeCommunication *bool `json:"enableInterNodeCommunication,omitempty"`
+ NetworkConfiguration *NetworkConfiguration `json:"networkConfiguration,omitempty"`
+ StartTask *StartTask `json:"startTask,omitempty"`
+ // CertificateReferences - For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory.
+ CertificateReferences *[]CertificateReference `json:"certificateReferences,omitempty"`
+ // ApplicationPackageReferences - Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool.
+ ApplicationPackageReferences *[]ApplicationPackageReference `json:"applicationPackageReferences,omitempty"`
+ // ApplicationLicenses - The list of application licenses must be a subset of available Batch service application licenses. If a license is requested which is not supported, Pool creation will fail.
+ ApplicationLicenses *[]string `json:"applicationLicenses,omitempty"`
+ // MaxTasksPerNode - The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the Pool or 256.
+ MaxTasksPerNode *int32 `json:"maxTasksPerNode,omitempty"`
+ // TaskSchedulingPolicy - If not specified, the default is spread.
+ TaskSchedulingPolicy *TaskSchedulingPolicy `json:"taskSchedulingPolicy,omitempty"`
+ UserAccounts *[]UserAccount `json:"userAccounts,omitempty"`
+ Metadata *[]MetadataItem `json:"metadata,omitempty"`
+ // Stats - This property is populated only if the CloudPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes.
+ Stats *PoolStatistics `json:"stats,omitempty"`
+ // MountConfiguration - This supports Azure Files, NFS, CIFS/SMB, and Blobfuse.
+ MountConfiguration *[]MountConfiguration `json:"mountConfiguration,omitempty"`
+}
+
+// CloudPoolListResult ...
+type CloudPoolListResult struct {
+ autorest.Response `json:"-"`
+ Value *[]CloudPool `json:"value,omitempty"`
+ OdataNextLink *string `json:"odata.nextLink,omitempty"`
+}
+
+// CloudPoolListResultIterator provides access to a complete listing of CloudPool values.
+type CloudPoolListResultIterator struct {
+ i int
+ page CloudPoolListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *CloudPoolListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CloudPoolListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *CloudPoolListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter CloudPoolListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter CloudPoolListResultIterator) Response() CloudPoolListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter CloudPoolListResultIterator) Value() CloudPool {
+ if !iter.page.NotDone() {
+ return CloudPool{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the CloudPoolListResultIterator type.
+func NewCloudPoolListResultIterator(page CloudPoolListResultPage) CloudPoolListResultIterator {
+ return CloudPoolListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (cplr CloudPoolListResult) IsEmpty() bool {
+ return cplr.Value == nil || len(*cplr.Value) == 0
+}
+
+// cloudPoolListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (cplr CloudPoolListResult) cloudPoolListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if cplr.OdataNextLink == nil || len(to.String(cplr.OdataNextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(cplr.OdataNextLink)))
+}
+
+// CloudPoolListResultPage contains a page of CloudPool values.
+type CloudPoolListResultPage struct {
+ fn func(context.Context, CloudPoolListResult) (CloudPoolListResult, error)
+ cplr CloudPoolListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *CloudPoolListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CloudPoolListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.cplr)
+ if err != nil {
+ return err
+ }
+ page.cplr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *CloudPoolListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page CloudPoolListResultPage) NotDone() bool {
+ return !page.cplr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page CloudPoolListResultPage) Response() CloudPoolListResult {
+ return page.cplr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page CloudPoolListResultPage) Values() []CloudPool {
+ if page.cplr.IsEmpty() {
+ return nil
+ }
+ return *page.cplr.Value
+}
+
+// Creates a new instance of the CloudPoolListResultPage type.
+func NewCloudPoolListResultPage(getNextPage func(context.Context, CloudPoolListResult) (CloudPoolListResult, error)) CloudPoolListResultPage {
+ return CloudPoolListResultPage{fn: getNextPage}
+}
+
+// CloudServiceConfiguration ...
+type CloudServiceConfiguration struct {
+ // OsFamily - Possible values are:
+ // 2 - OS Family 2, equivalent to Windows Server 2008 R2 SP1.
+ // 3 - OS Family 3, equivalent to Windows Server 2012.
+ // 4 - OS Family 4, equivalent to Windows Server 2012 R2.
+ // 5 - OS Family 5, equivalent to Windows Server 2016.
+ // 6 - OS Family 6, equivalent to Windows Server 2019. For more information, see Azure Guest OS Releases (https://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix/#releases).
+ OsFamily *string `json:"osFamily,omitempty"`
+ // OsVersion - The default value is * which specifies the latest operating system version for the specified OS family.
+ OsVersion *string `json:"osVersion,omitempty"`
+}
+
+// CloudTask batch will retry Tasks when a recovery operation is triggered on a Node. Examples of recovery
+// operations include (but are not limited to) when an unhealthy Node is rebooted or a Compute Node
+// disappeared due to host failure. Retries due to recovery operations are independent of and are not
+// counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal retry due to a
+// recovery operation may occur. Because of this, all Tasks should be idempotent. This means Tasks need to
+// tolerate being interrupted and restarted without causing any corruption or duplicate data. The best
+// practice for long running Tasks is to use some form of checkpointing.
+type CloudTask struct {
+ autorest.Response `json:"-"`
+ // ID - The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters.
+ ID *string `json:"id,omitempty"`
+ // DisplayName - The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024.
+ DisplayName *string `json:"displayName,omitempty"`
+ URL *string `json:"url,omitempty"`
+ // ETag - This is an opaque string. You can use it to detect whether the Task has changed between requests. In particular, you can be pass the ETag when updating a Task to specify that your changes should take effect only if nobody else has modified the Task in the meantime.
+ ETag *string `json:"eTag,omitempty"`
+ LastModified *date.Time `json:"lastModified,omitempty"`
+ CreationTime *date.Time `json:"creationTime,omitempty"`
+ // ExitConditions - How the Batch service should respond when the Task completes.
+ ExitConditions *ExitConditions `json:"exitConditions,omitempty"`
+ // State - Possible values include: 'TaskStateActive', 'TaskStatePreparing', 'TaskStateRunning', 'TaskStateCompleted'
+ State TaskState `json:"state,omitempty"`
+ StateTransitionTime *date.Time `json:"stateTransitionTime,omitempty"`
+ // PreviousState - This property is not set if the Task is in its initial Active state. Possible values include: 'TaskStateActive', 'TaskStatePreparing', 'TaskStateRunning', 'TaskStateCompleted'
+ PreviousState TaskState `json:"previousState,omitempty"`
+ // PreviousStateTransitionTime - This property is not set if the Task is in its initial Active state.
+ PreviousStateTransitionTime *date.Time `json:"previousStateTransitionTime,omitempty"`
+ // CommandLine - For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables).
+ CommandLine *string `json:"commandLine,omitempty"`
+ // ContainerSettings - If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files.
+ ContainerSettings *TaskContainerSettings `json:"containerSettings,omitempty"`
+ // ResourceFiles - For multi-instance Tasks, the resource files will only be downloaded to the Compute Node on which the primary Task is executed. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers.
+ ResourceFiles *[]ResourceFile `json:"resourceFiles,omitempty"`
+ // OutputFiles - For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed.
+ OutputFiles *[]OutputFile `json:"outputFiles,omitempty"`
+ EnvironmentSettings *[]EnvironmentSetting `json:"environmentSettings,omitempty"`
+ AffinityInfo *AffinityInformation `json:"affinityInfo,omitempty"`
+ Constraints *TaskConstraints `json:"constraints,omitempty"`
+ // UserIdentity - If omitted, the Task runs as a non-administrative user unique to the Task.
+ UserIdentity *UserIdentity `json:"userIdentity,omitempty"`
+ ExecutionInfo *TaskExecutionInformation `json:"executionInfo,omitempty"`
+ NodeInfo *ComputeNodeInformation `json:"nodeInfo,omitempty"`
+ MultiInstanceSettings *MultiInstanceSettings `json:"multiInstanceSettings,omitempty"`
+ Stats *TaskStatistics `json:"stats,omitempty"`
+ // DependsOn - This Task will not be scheduled until all Tasks that it depends on have completed successfully. If any of those Tasks fail and exhaust their retry counts, this Task will never be scheduled.
+ DependsOn *TaskDependencies `json:"dependsOn,omitempty"`
+ // ApplicationPackageReferences - Application packages are downloaded and deployed to a shared directory, not the Task working directory. Therefore, if a referenced package is already on the Node, and is up to date, then it is not re-downloaded; the existing copy on the Compute Node is used. If a referenced Package cannot be installed, for example because the package has been deleted or because download failed, the Task fails.
+ ApplicationPackageReferences *[]ApplicationPackageReference `json:"applicationPackageReferences,omitempty"`
+ // AuthenticationTokenSettings - If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job.
+ AuthenticationTokenSettings *AuthenticationTokenSettings `json:"authenticationTokenSettings,omitempty"`
+}
+
+// CloudTaskListResult ...
+type CloudTaskListResult struct {
+ autorest.Response `json:"-"`
+ Value *[]CloudTask `json:"value,omitempty"`
+ OdataNextLink *string `json:"odata.nextLink,omitempty"`
+}
+
+// CloudTaskListResultIterator provides access to a complete listing of CloudTask values.
+type CloudTaskListResultIterator struct {
+ i int
+ page CloudTaskListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *CloudTaskListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CloudTaskListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *CloudTaskListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter CloudTaskListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter CloudTaskListResultIterator) Response() CloudTaskListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter CloudTaskListResultIterator) Value() CloudTask {
+ if !iter.page.NotDone() {
+ return CloudTask{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the CloudTaskListResultIterator type.
+func NewCloudTaskListResultIterator(page CloudTaskListResultPage) CloudTaskListResultIterator {
+ return CloudTaskListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (ctlr CloudTaskListResult) IsEmpty() bool {
+ return ctlr.Value == nil || len(*ctlr.Value) == 0
+}
+
+// cloudTaskListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (ctlr CloudTaskListResult) cloudTaskListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if ctlr.OdataNextLink == nil || len(to.String(ctlr.OdataNextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(ctlr.OdataNextLink)))
+}
+
+// CloudTaskListResultPage contains a page of CloudTask values.
+type CloudTaskListResultPage struct {
+ fn func(context.Context, CloudTaskListResult) (CloudTaskListResult, error)
+ ctlr CloudTaskListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *CloudTaskListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CloudTaskListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.ctlr)
+ if err != nil {
+ return err
+ }
+ page.ctlr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *CloudTaskListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page CloudTaskListResultPage) NotDone() bool {
+ return !page.ctlr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page CloudTaskListResultPage) Response() CloudTaskListResult {
+ return page.ctlr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page CloudTaskListResultPage) Values() []CloudTask {
+ if page.ctlr.IsEmpty() {
+ return nil
+ }
+ return *page.ctlr.Value
+}
+
+// Creates a new instance of the CloudTaskListResultPage type.
+func NewCloudTaskListResultPage(getNextPage func(context.Context, CloudTaskListResult) (CloudTaskListResult, error)) CloudTaskListResultPage {
+ return CloudTaskListResultPage{fn: getNextPage}
+}
+
+// CloudTaskListSubtasksResult ...
+type CloudTaskListSubtasksResult struct {
+ autorest.Response `json:"-"`
+ Value *[]SubtaskInformation `json:"value,omitempty"`
+}
+
+// ComputeNode ...
+type ComputeNode struct {
+ autorest.Response `json:"-"`
+ // ID - Every Compute Node that is added to a Pool is assigned a unique ID. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the ID is reclaimed and could be reused for new Compute Nodes.
+ ID *string `json:"id,omitempty"`
+ URL *string `json:"url,omitempty"`
+ // State - The low-priority Compute Node has been preempted. Tasks which were running on the Compute Node when it was preempted will be rescheduled when another Compute Node becomes available. Possible values include: 'Idle', 'Rebooting', 'Reimaging', 'Running', 'Unusable', 'Creating', 'Starting', 'WaitingForStartTask', 'StartTaskFailed', 'Unknown', 'LeavingPool', 'Offline', 'Preempted'
+ State ComputeNodeState `json:"state,omitempty"`
+ // SchedulingState - Possible values include: 'Enabled', 'Disabled'
+ SchedulingState SchedulingState `json:"schedulingState,omitempty"`
+ StateTransitionTime *date.Time `json:"stateTransitionTime,omitempty"`
+ // LastBootTime - This property may not be present if the Compute Node state is unusable.
+ LastBootTime *date.Time `json:"lastBootTime,omitempty"`
+ // AllocationTime - This is the time when the Compute Node was initially allocated and doesn't change once set. It is not updated when the Compute Node is service healed or preempted.
+ AllocationTime *date.Time `json:"allocationTime,omitempty"`
+ // IPAddress - Every Compute Node that is added to a Pool is assigned a unique IP address. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the IP address is reclaimed and could be reused for new Compute Nodes.
+ IPAddress *string `json:"ipAddress,omitempty"`
+ // AffinityID - Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere.
+ AffinityID *string `json:"affinityId,omitempty"`
+ // VMSize - For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes).
+ VMSize *string `json:"vmSize,omitempty"`
+ TotalTasksRun *int32 `json:"totalTasksRun,omitempty"`
+ RunningTasksCount *int32 `json:"runningTasksCount,omitempty"`
+ TotalTasksSucceeded *int32 `json:"totalTasksSucceeded,omitempty"`
+ // RecentTasks - This property is present only if at least one Task has run on this Compute Node since it was assigned to the Pool.
+ RecentTasks *[]TaskInformation `json:"recentTasks,omitempty"`
+ StartTask *StartTask `json:"startTask,omitempty"`
+ StartTaskInfo *StartTaskInformation `json:"startTaskInfo,omitempty"`
+ // CertificateReferences - For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory.
+ CertificateReferences *[]CertificateReference `json:"certificateReferences,omitempty"`
+ Errors *[]ComputeNodeError `json:"errors,omitempty"`
+ IsDedicated *bool `json:"isDedicated,omitempty"`
+ EndpointConfiguration *ComputeNodeEndpointConfiguration `json:"endpointConfiguration,omitempty"`
+ NodeAgentInfo *NodeAgentInformation `json:"nodeAgentInfo,omitempty"`
+}
+
+// ComputeNodeEndpointConfiguration ...
+type ComputeNodeEndpointConfiguration struct {
+ InboundEndpoints *[]InboundEndpoint `json:"inboundEndpoints,omitempty"`
+}
+
+// ComputeNodeError ...
+type ComputeNodeError struct {
+ Code *string `json:"code,omitempty"`
+ Message *string `json:"message,omitempty"`
+ ErrorDetails *[]NameValuePair `json:"errorDetails,omitempty"`
+}
+
+// ComputeNodeGetRemoteLoginSettingsResult ...
+type ComputeNodeGetRemoteLoginSettingsResult struct {
+ autorest.Response `json:"-"`
+ RemoteLoginIPAddress *string `json:"remoteLoginIPAddress,omitempty"`
+ RemoteLoginPort *int32 `json:"remoteLoginPort,omitempty"`
+}
+
+// ComputeNodeInformation ...
+type ComputeNodeInformation struct {
+ AffinityID *string `json:"affinityId,omitempty"`
+ NodeURL *string `json:"nodeUrl,omitempty"`
+ PoolID *string `json:"poolId,omitempty"`
+ NodeID *string `json:"nodeId,omitempty"`
+ TaskRootDirectory *string `json:"taskRootDirectory,omitempty"`
+ TaskRootDirectoryURL *string `json:"taskRootDirectoryUrl,omitempty"`
+}
+
+// ComputeNodeListResult ...
+type ComputeNodeListResult struct {
+ autorest.Response `json:"-"`
+ Value *[]ComputeNode `json:"value,omitempty"`
+ OdataNextLink *string `json:"odata.nextLink,omitempty"`
+}
+
+// ComputeNodeListResultIterator provides access to a complete listing of ComputeNode values.
+type ComputeNodeListResultIterator struct {
+ i int
+ page ComputeNodeListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *ComputeNodeListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ComputeNodeListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *ComputeNodeListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter ComputeNodeListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter ComputeNodeListResultIterator) Response() ComputeNodeListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter ComputeNodeListResultIterator) Value() ComputeNode {
+ if !iter.page.NotDone() {
+ return ComputeNode{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the ComputeNodeListResultIterator type.
+func NewComputeNodeListResultIterator(page ComputeNodeListResultPage) ComputeNodeListResultIterator {
+ return ComputeNodeListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (cnlr ComputeNodeListResult) IsEmpty() bool {
+ return cnlr.Value == nil || len(*cnlr.Value) == 0
+}
+
+// computeNodeListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (cnlr ComputeNodeListResult) computeNodeListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if cnlr.OdataNextLink == nil || len(to.String(cnlr.OdataNextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(cnlr.OdataNextLink)))
+}
+
+// ComputeNodeListResultPage contains a page of ComputeNode values.
+type ComputeNodeListResultPage struct {
+ fn func(context.Context, ComputeNodeListResult) (ComputeNodeListResult, error)
+ cnlr ComputeNodeListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *ComputeNodeListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ComputeNodeListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.cnlr)
+ if err != nil {
+ return err
+ }
+ page.cnlr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *ComputeNodeListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page ComputeNodeListResultPage) NotDone() bool {
+ return !page.cnlr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page ComputeNodeListResultPage) Response() ComputeNodeListResult {
+ return page.cnlr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page ComputeNodeListResultPage) Values() []ComputeNode {
+ if page.cnlr.IsEmpty() {
+ return nil
+ }
+ return *page.cnlr.Value
+}
+
+// Creates a new instance of the ComputeNodeListResultPage type.
+func NewComputeNodeListResultPage(getNextPage func(context.Context, ComputeNodeListResult) (ComputeNodeListResult, error)) ComputeNodeListResultPage {
+ return ComputeNodeListResultPage{fn: getNextPage}
+}
+
+// ComputeNodeUser ...
+type ComputeNodeUser struct {
+ Name *string `json:"name,omitempty"`
+ // IsAdmin - The default value is false.
+ IsAdmin *bool `json:"isAdmin,omitempty"`
+ // ExpiryTime - If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day.
+ ExpiryTime *date.Time `json:"expiryTime,omitempty"`
+ // Password - The password is required for Windows Compute Nodes (those created with 'cloudServiceConfiguration', or created with 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property.
+ Password *string `json:"password,omitempty"`
+ // SSHPublicKey - The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This property can be specified only for Linux Compute Nodes. If this is specified for a Windows Compute Node, then the Batch service rejects the request; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request).
+ SSHPublicKey *string `json:"sshPublicKey,omitempty"`
+}
+
+// ContainerConfiguration ...
+type ContainerConfiguration struct {
+ Type *string `json:"type,omitempty"`
+ // ContainerImageNames - This is the full Image reference, as would be specified to "docker pull". An Image will be sourced from the default Docker registry unless the Image is fully qualified with an alternative registry.
+ ContainerImageNames *[]string `json:"containerImageNames,omitempty"`
+ // ContainerRegistries - If any Images must be downloaded from a private registry which requires credentials, then those credentials must be provided here.
+ ContainerRegistries *[]ContainerRegistry `json:"containerRegistries,omitempty"`
+}
+
+// ContainerRegistry ...
+type ContainerRegistry struct {
+ // RegistryServer - If omitted, the default is "docker.io".
+ RegistryServer *string `json:"registryServer,omitempty"`
+ UserName *string `json:"username,omitempty"`
+ Password *string `json:"password,omitempty"`
+}
+
+// DataDisk ...
+type DataDisk struct {
+ // Lun - The lun is used to uniquely identify each data disk. If attaching multiple disks, each should have a distinct lun.
+ Lun *int32 `json:"lun,omitempty"`
+ // Caching - The default value for caching is readwrite. For information about the caching options see: https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. Possible values include: 'None', 'ReadOnly', 'ReadWrite'
+ Caching CachingType `json:"caching,omitempty"`
+ DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
+ // StorageAccountType - If omitted, the default is "standard_lrs". Possible values include: 'StandardLRS', 'PremiumLRS'
+ StorageAccountType StorageAccountType `json:"storageAccountType,omitempty"`
+}
+
+// DeleteCertificateError ...
+type DeleteCertificateError struct {
+ Code *string `json:"code,omitempty"`
+ Message *string `json:"message,omitempty"`
+ // Values - This list includes details such as the active Pools and Compute Nodes referencing this Certificate. However, if a large number of resources reference the Certificate, the list contains only about the first hundred.
+ Values *[]NameValuePair `json:"values,omitempty"`
+}
+
+// EnvironmentSetting ...
+type EnvironmentSetting struct {
+ Name *string `json:"name,omitempty"`
+ Value *string `json:"value,omitempty"`
+}
+
+// Error ...
+type Error struct {
+ Code *string `json:"code,omitempty"`
+ Message *ErrorMessage `json:"message,omitempty"`
+ Values *[]ErrorDetail `json:"values,omitempty"`
+}
+
+// ErrorDetail ...
+type ErrorDetail struct {
+ Key *string `json:"key,omitempty"`
+ Value *string `json:"value,omitempty"`
+}
+
+// ErrorMessage ...
+type ErrorMessage struct {
+ Lang *string `json:"lang,omitempty"`
+ Value *string `json:"value,omitempty"`
+}
+
+// ExitCodeMapping ...
+type ExitCodeMapping struct {
+ Code *int32 `json:"code,omitempty"`
+ ExitOptions *ExitOptions `json:"exitOptions,omitempty"`
+}
+
+// ExitCodeRangeMapping ...
+type ExitCodeRangeMapping struct {
+ Start *int32 `json:"start,omitempty"`
+ End *int32 `json:"end,omitempty"`
+ ExitOptions *ExitOptions `json:"exitOptions,omitempty"`
+}
+
+// ExitConditions ...
+type ExitConditions struct {
+ ExitCodes *[]ExitCodeMapping `json:"exitCodes,omitempty"`
+ ExitCodeRanges *[]ExitCodeRangeMapping `json:"exitCodeRanges,omitempty"`
+ PreProcessingError *ExitOptions `json:"preProcessingError,omitempty"`
+ // FileUploadError - If the Task exited with an exit code that was specified via exitCodes or exitCodeRanges, and then encountered a file upload error, then the action specified by the exit code takes precedence.
+ FileUploadError *ExitOptions `json:"fileUploadError,omitempty"`
+ // Default - This value is used if the Task exits with any nonzero exit code not listed in the exitCodes or exitCodeRanges collection, with a pre-processing error if the preProcessingError property is not present, or with a file upload error if the fileUploadError property is not present. If you want non-default behavior on exit code 0, you must list it explicitly using the exitCodes or exitCodeRanges collection.
+ Default *ExitOptions `json:"default,omitempty"`
+}
+
+// ExitOptions ...
+type ExitOptions struct {
+ // JobAction - The default is none for exit code 0 and terminate for all other exit conditions. If the Job's onTaskFailed property is noaction, then specifying this property returns an error and the add Task request fails with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). Possible values include: 'JobActionNone', 'JobActionDisable', 'JobActionTerminate'
+ JobAction JobAction `json:"jobAction,omitempty"`
+ // DependencyAction - Possible values are 'satisfy' (allowing dependent tasks to progress) and 'block' (dependent tasks continue to wait). Batch does not yet support cancellation of dependent tasks. Possible values include: 'Satisfy', 'Block'
+ DependencyAction DependencyAction `json:"dependencyAction,omitempty"`
+}
+
+// FileProperties ...
+type FileProperties struct {
+ // CreationTime - The creation time is not returned for files on Linux Compute Nodes.
+ CreationTime *date.Time `json:"creationTime,omitempty"`
+ LastModified *date.Time `json:"lastModified,omitempty"`
+ ContentLength *int64 `json:"contentLength,omitempty"`
+ ContentType *string `json:"contentType,omitempty"`
+ // FileMode - The file mode is returned only for files on Linux Compute Nodes.
+ FileMode *string `json:"fileMode,omitempty"`
+}
+
+// ImageInformation ...
+type ImageInformation struct {
+ NodeAgentSKUID *string `json:"nodeAgentSKUId,omitempty"`
+ ImageReference *ImageReference `json:"imageReference,omitempty"`
+ // OsType - Possible values include: 'Linux', 'Windows'
+ OsType OSType `json:"osType,omitempty"`
+ // Capabilities - Not every capability of the Image is listed. Capabilities in this list are considered of special interest and are generally related to integration with other features in the Azure Batch service.
+ Capabilities *[]string `json:"capabilities,omitempty"`
+ BatchSupportEndOfLife *date.Time `json:"batchSupportEndOfLife,omitempty"`
+ // VerificationType - Possible values include: 'Verified', 'Unverified'
+ VerificationType VerificationType `json:"verificationType,omitempty"`
+}
+
+// ImageReference ...
+type ImageReference struct {
+ // Publisher - For example, Canonical or MicrosoftWindowsServer.
+ Publisher *string `json:"publisher,omitempty"`
+ // Offer - For example, UbuntuServer or WindowsServer.
+ Offer *string `json:"offer,omitempty"`
+ // Sku - For example, 18.04-LTS or 2019-Datacenter.
+ Sku *string `json:"sku,omitempty"`
+ // Version - A value of 'latest' can be specified to select the latest version of an Image. If omitted, the default is 'latest'.
+ Version *string `json:"version,omitempty"`
+ // VirtualMachineImageID - This property is mutually exclusive with other ImageReference properties. For Virtual Machine Image it must be in the same region and subscription as the Azure Batch account. For SIG image it must have replicas in the same region as the Azure Batch account. For information about the firewall settings for the Batch Compute Node agent to communicate with the Batch service see https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration.
+ VirtualMachineImageID *string `json:"virtualMachineImageId,omitempty"`
+}
+
+// InboundEndpoint ...
+type InboundEndpoint struct {
+ Name *string `json:"name,omitempty"`
+ // Protocol - Possible values include: 'TCP', 'UDP'
+ Protocol InboundEndpointProtocol `json:"protocol,omitempty"`
+ PublicIPAddress *string `json:"publicIPAddress,omitempty"`
+ PublicFQDN *string `json:"publicFQDN,omitempty"`
+ FrontendPort *int32 `json:"frontendPort,omitempty"`
+ BackendPort *int32 `json:"backendPort,omitempty"`
+}
+
+// InboundNATPool ...
+type InboundNATPool struct {
+ // Name - The name must be unique within a Batch Pool, can contain letters, numbers, underscores, periods, and hyphens. Names must start with a letter or number, must end with a letter, number, or underscore, and cannot exceed 77 characters. If any invalid values are provided the request fails with HTTP status code 400.
+ Name *string `json:"name,omitempty"`
+ // Protocol - Possible values include: 'TCP', 'UDP'
+ Protocol InboundEndpointProtocol `json:"protocol,omitempty"`
+ // BackendPort - This must be unique within a Batch Pool. Acceptable values are between 1 and 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400.
+ BackendPort *int32 `json:"backendPort,omitempty"`
+ // FrontendPortRangeStart - Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved. All ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400.
+ FrontendPortRangeStart *int32 `json:"frontendPortRangeStart,omitempty"`
+ // FrontendPortRangeEnd - Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved by the Batch service. All ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400.
+ FrontendPortRangeEnd *int32 `json:"frontendPortRangeEnd,omitempty"`
+ // NetworkSecurityGroupRules - The maximum number of rules that can be specified across all the endpoints on a Batch Pool is 25. If no network security group rules are specified, a default rule will be created to allow inbound access to the specified backendPort. If the maximum number of network security group rules is exceeded the request fails with HTTP status code 400.
+ NetworkSecurityGroupRules *[]NetworkSecurityGroupRule `json:"networkSecurityGroupRules,omitempty"`
+}
+
+// JobAddParameter ...
+type JobAddParameter struct {
+ // ID - The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case).
+ ID *string `json:"id,omitempty"`
+ // DisplayName - The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024.
+ DisplayName *string `json:"displayName,omitempty"`
+ // Priority - Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0.
+ Priority *int32 `json:"priority,omitempty"`
+ // Constraints - The execution constraints for the Job.
+ Constraints *JobConstraints `json:"constraints,omitempty"`
+ // JobManagerTask - If the Job does not specify a Job Manager Task, the user must explicitly add Tasks to the Job. If the Job does specify a Job Manager Task, the Batch service creates the Job Manager Task when the Job is created, and will try to schedule the Job Manager Task before scheduling other Tasks in the Job. The Job Manager Task's typical purpose is to control and/or monitor Job execution, for example by deciding what additional Tasks to run, determining when the work is complete, etc. (However, a Job Manager Task is not restricted to these activities - it is a fully-fledged Task in the system and perform whatever actions are required for the Job.) For example, a Job Manager Task might download a file specified as a parameter, analyze the contents of that file and submit additional Tasks based on those contents.
+ JobManagerTask *JobManagerTask `json:"jobManagerTask,omitempty"`
+ // JobPreparationTask - If a Job has a Job Preparation Task, the Batch service will run the Job Preparation Task on a Node before starting any Tasks of that Job on that Compute Node.
+ JobPreparationTask *JobPreparationTask `json:"jobPreparationTask,omitempty"`
+ // JobReleaseTask - A Job Release Task cannot be specified without also specifying a Job Preparation Task for the Job. The Batch service runs the Job Release Task on the Nodes that have run the Job Preparation Task. The primary purpose of the Job Release Task is to undo changes to Compute Nodes made by the Job Preparation Task. Example activities include deleting local files, or shutting down services that were started as part of Job preparation.
+ JobReleaseTask *JobReleaseTask `json:"jobReleaseTask,omitempty"`
+ // CommonEnvironmentSettings - Individual Tasks can override an environment setting specified here by specifying the same setting name with a different value.
+ CommonEnvironmentSettings *[]EnvironmentSetting `json:"commonEnvironmentSettings,omitempty"`
+ PoolInfo *PoolInformation `json:"poolInfo,omitempty"`
+ // OnAllTasksComplete - Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. Possible values include: 'NoAction', 'TerminateJob'
+ OnAllTasksComplete OnAllTasksComplete `json:"onAllTasksComplete,omitempty"`
+ // OnTaskFailure - A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. Possible values include: 'OnTaskFailureNoAction', 'OnTaskFailurePerformExitOptionsJobAction'
+ OnTaskFailure OnTaskFailure `json:"onTaskFailure,omitempty"`
+ // Metadata - The Batch service does not assign any meaning to metadata; it is solely for the use of user code.
+ Metadata *[]MetadataItem `json:"metadata,omitempty"`
+ UsesTaskDependencies *bool `json:"usesTaskDependencies,omitempty"`
+ NetworkConfiguration *JobNetworkConfiguration `json:"networkConfiguration,omitempty"`
+}
+
+// JobConstraints ...
+type JobConstraints struct {
+ // MaxWallClockTime - If the Job does not complete within the time limit, the Batch service terminates it and any Tasks that are still running. In this case, the termination reason will be MaxWallClockTimeExpiry. If this property is not specified, there is no time limit on how long the Job may run.
+ MaxWallClockTime *string `json:"maxWallClockTime,omitempty"`
+ // MaxTaskRetryCount - Note that this value specifically controls the number of retries. The Batch service will try each Task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries a Task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry Tasks. If the maximum retry count is -1, the Batch service retries Tasks without limit. The default value is 0 (no retries).
+ MaxTaskRetryCount *int32 `json:"maxTaskRetryCount,omitempty"`
+}
+
+// JobDisableParameter ...
+type JobDisableParameter struct {
+ // DisableTasks - Possible values include: 'DisableJobOptionRequeue', 'DisableJobOptionTerminate', 'DisableJobOptionWait'
+ DisableTasks DisableJobOption `json:"disableTasks,omitempty"`
+}
+
+// JobExecutionInformation ...
+type JobExecutionInformation struct {
+ // StartTime - This is the time at which the Job was created.
+ StartTime *date.Time `json:"startTime,omitempty"`
+ // EndTime - This property is set only if the Job is in the completed state.
+ EndTime *date.Time `json:"endTime,omitempty"`
+ // PoolID - This element contains the actual Pool where the Job is assigned. When you get Job details from the service, they also contain a poolInfo element, which contains the Pool configuration data from when the Job was added or updated. That poolInfo element may also contain a poolId element. If it does, the two IDs are the same. If it does not, it means the Job ran on an auto Pool, and this property contains the ID of that auto Pool.
+ PoolID *string `json:"poolId,omitempty"`
+ // SchedulingError - This property is not set if there was no error starting the Job.
+ SchedulingError *JobSchedulingError `json:"schedulingError,omitempty"`
+ // TerminateReason - This property is set only if the Job is in the completed state. If the Batch service terminates the Job, it sets the reason as follows: JMComplete - the Job Manager Task completed, and killJobOnCompletion was set to true. MaxWallClockTimeExpiry - the Job reached its maxWallClockTime constraint. TerminateJobSchedule - the Job ran as part of a schedule, and the schedule terminated. AllTasksComplete - the Job's onAllTasksComplete attribute is set to terminatejob, and all Tasks in the Job are complete. TaskFailed - the Job's onTaskFailure attribute is set to performExitOptionsJobAction, and a Task in the Job failed with an exit condition that specified a jobAction of terminatejob. Any other string is a user-defined reason specified in a call to the 'Terminate a Job' operation.
+ TerminateReason *string `json:"terminateReason,omitempty"`
+}
+
+// JobManagerTask the Job Manager Task is automatically started when the Job is created. The Batch service
+// tries to schedule the Job Manager Task before any other Tasks in the Job. When shrinking a Pool, the
+// Batch service tries to preserve Nodes where Job Manager Tasks are running for as long as possible (that
+// is, Compute Nodes running 'normal' Tasks are removed before Compute Nodes running Job Manager Tasks).
+// When a Job Manager Task fails and needs to be restarted, the system tries to schedule it at the highest
+// priority. If there are no idle Compute Nodes available, the system may terminate one of the running
+// Tasks in the Pool and return it to the queue in order to make room for the Job Manager Task to restart.
+// Note that a Job Manager Task in one Job does not have priority over Tasks in other Jobs. Across Jobs,
+// only Job level priorities are observed. For example, if a Job Manager in a priority 0 Job needs to be
+// restarted, it will not displace Tasks of a priority 1 Job. Batch will retry Tasks when a recovery
+// operation is triggered on a Node. Examples of recovery operations include (but are not limited to) when
+// an unhealthy Node is rebooted or a Compute Node disappeared due to host failure. Retries due to recovery
+// operations are independent of and are not counted against the maxTaskRetryCount. Even if the
+// maxTaskRetryCount is 0, an internal retry due to a recovery operation may occur. Because of this, all
+// Tasks should be idempotent. This means Tasks need to tolerate being interrupted and restarted without
+// causing any corruption or duplicate data. The best practice for long running Tasks is to use some form
+// of checkpointing.
+type JobManagerTask struct {
+ // ID - The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters.
+ ID *string `json:"id,omitempty"`
+ // DisplayName - It need not be unique and can contain any Unicode characters up to a maximum length of 1024.
+ DisplayName *string `json:"displayName,omitempty"`
+ // CommandLine - The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables).
+ CommandLine *string `json:"commandLine,omitempty"`
+ // ContainerSettings - If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files.
+ ContainerSettings *TaskContainerSettings `json:"containerSettings,omitempty"`
+ // ResourceFiles - Files listed under this element are located in the Task's working directory. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers.
+ ResourceFiles *[]ResourceFile `json:"resourceFiles,omitempty"`
+ // OutputFiles - For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed.
+ OutputFiles *[]OutputFile `json:"outputFiles,omitempty"`
+ EnvironmentSettings *[]EnvironmentSetting `json:"environmentSettings,omitempty"`
+ Constraints *TaskConstraints `json:"constraints,omitempty"`
+ // KillJobOnCompletion - If true, when the Job Manager Task completes, the Batch service marks the Job as complete. If any Tasks are still running at this time (other than Job Release), those Tasks are terminated. If false, the completion of the Job Manager Task does not affect the Job status. In this case, you should either use the onAllTasksComplete attribute to terminate the Job, or have a client or user terminate the Job explicitly. An example of this is if the Job Manager creates a set of Tasks but then takes no further role in their execution. The default value is true. If you are using the onAllTasksComplete and onTaskFailure attributes to control Job lifetime, and using the Job Manager Task only to create the Tasks for the Job (not to monitor progress), then it is important to set killJobOnCompletion to false.
+ KillJobOnCompletion *bool `json:"killJobOnCompletion,omitempty"`
+ // UserIdentity - If omitted, the Task runs as a non-administrative user unique to the Task.
+ UserIdentity *UserIdentity `json:"userIdentity,omitempty"`
+ // RunExclusive - If true, no other Tasks will run on the same Node for as long as the Job Manager is running. If false, other Tasks can run simultaneously with the Job Manager on a Compute Node. The Job Manager Task counts normally against the Compute Node's concurrent Task limit, so this is only relevant if the Compute Node allows multiple concurrent Tasks. The default value is true.
+ RunExclusive *bool `json:"runExclusive,omitempty"`
+ // ApplicationPackageReferences - Application Packages are downloaded and deployed to a shared directory, not the Task working directory. Therefore, if a referenced Application Package is already on the Compute Node, and is up to date, then it is not re-downloaded; the existing copy on the Compute Node is used. If a referenced Application Package cannot be installed, for example because the package has been deleted or because download failed, the Task fails.
+ ApplicationPackageReferences *[]ApplicationPackageReference `json:"applicationPackageReferences,omitempty"`
+ // AuthenticationTokenSettings - If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job.
+ AuthenticationTokenSettings *AuthenticationTokenSettings `json:"authenticationTokenSettings,omitempty"`
+ // AllowLowPriorityNode - The default value is true.
+ AllowLowPriorityNode *bool `json:"allowLowPriorityNode,omitempty"`
+}
+
+// JobNetworkConfiguration ...
+type JobNetworkConfiguration struct {
+ // SubnetID - The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration
+ SubnetID *string `json:"subnetId,omitempty"`
+}
+
+// JobPatchParameter ...
+type JobPatchParameter struct {
+ // Priority - Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If omitted, the priority of the Job is left unchanged.
+ Priority *int32 `json:"priority,omitempty"`
+ // OnAllTasksComplete - If omitted, the completion behavior is left unchanged. You may not change the value from terminatejob to noaction - that is, once you have engaged automatic Job termination, you cannot turn it off again. If you try to do this, the request fails with an 'invalid property value' error response; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). Possible values include: 'NoAction', 'TerminateJob'
+ OnAllTasksComplete OnAllTasksComplete `json:"onAllTasksComplete,omitempty"`
+ // Constraints - If omitted, the existing execution constraints are left unchanged.
+ Constraints *JobConstraints `json:"constraints,omitempty"`
+ // PoolInfo - You may change the Pool for a Job only when the Job is disabled. The Patch Job call will fail if you include the poolInfo element and the Job is not disabled. If you specify an autoPoolSpecification in the poolInfo, only the keepAlive property of the autoPoolSpecification can be updated, and then only if the autoPoolSpecification has a poolLifetimeOption of Job (other job properties can be updated as normal). If omitted, the Job continues to run on its current Pool.
+ PoolInfo *PoolInformation `json:"poolInfo,omitempty"`
+ // Metadata - If omitted, the existing Job metadata is left unchanged.
+ Metadata *[]MetadataItem `json:"metadata,omitempty"`
+}
+
+// JobPreparationAndReleaseTaskExecutionInformation ...
+type JobPreparationAndReleaseTaskExecutionInformation struct {
+ PoolID *string `json:"poolId,omitempty"`
+ NodeID *string `json:"nodeId,omitempty"`
+ NodeURL *string `json:"nodeUrl,omitempty"`
+ JobPreparationTaskExecutionInfo *JobPreparationTaskExecutionInformation `json:"jobPreparationTaskExecutionInfo,omitempty"`
+ // JobReleaseTaskExecutionInfo - This property is set only if the Job Release Task has run on the Compute Node.
+ JobReleaseTaskExecutionInfo *JobReleaseTaskExecutionInformation `json:"jobReleaseTaskExecutionInfo,omitempty"`
+}
+
+// JobPreparationTask you can use Job Preparation to prepare a Node to run Tasks for the Job. Activities
+// commonly performed in Job Preparation include: Downloading common resource files used by all the Tasks
+// in the Job. The Job Preparation Task can download these common resource files to the shared location on
+// the Node. (AZ_BATCH_NODE_ROOT_DIR\shared), or starting a local service on the Node so that all Tasks of
+// that Job can communicate with it. If the Job Preparation Task fails (that is, exhausts its retry count
+// before exiting with exit code 0), Batch will not run Tasks of this Job on the Node. The Compute Node
+// remains ineligible to run Tasks of this Job until it is reimaged. The Compute Node remains active and
+// can be used for other Jobs. The Job Preparation Task can run multiple times on the same Node. Therefore,
+// you should write the Job Preparation Task to handle re-execution. If the Node is rebooted, the Job
+// Preparation Task is run again on the Compute Node before scheduling any other Task of the Job, if
+// rerunOnNodeRebootAfterSuccess is true or if the Job Preparation Task did not previously complete. If the
+// Node is reimaged, the Job Preparation Task is run again before scheduling any Task of the Job. Batch
+// will retry Tasks when a recovery operation is triggered on a Node. Examples of recovery operations
+// include (but are not limited to) when an unhealthy Node is rebooted or a Compute Node disappeared due to
+// host failure. Retries due to recovery operations are independent of and are not counted against the
+// maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal retry due to a recovery operation may
+// occur. Because of this, all Tasks should be idempotent. This means Tasks need to tolerate being
+// interrupted and restarted without causing any corruption or duplicate data. The best practice for long
+// running Tasks is to use some form of checkpointing.
+type JobPreparationTask struct {
+ // ID - The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobpreparation'. No other Task in the Job can have the same ID as the Job Preparation Task. If you try to submit a Task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict).
+ ID *string `json:"id,omitempty"`
+ // CommandLine - The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables).
+ CommandLine *string `json:"commandLine,omitempty"`
+ // ContainerSettings - When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files.
+ ContainerSettings *TaskContainerSettings `json:"containerSettings,omitempty"`
+ // ResourceFiles - Files listed under this element are located in the Task's working directory. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers.
+ ResourceFiles *[]ResourceFile `json:"resourceFiles,omitempty"`
+ EnvironmentSettings *[]EnvironmentSetting `json:"environmentSettings,omitempty"`
+ Constraints *TaskConstraints `json:"constraints,omitempty"`
+ // WaitForSuccess - If true and the Job Preparation Task fails on a Node, the Batch service retries the Job Preparation Task up to its maximum retry count (as specified in the constraints element). If the Task has still not completed successfully after all retries, then the Batch service will not schedule Tasks of the Job to the Node. The Node remains active and eligible to run Tasks of other Jobs. If false, the Batch service will not wait for the Job Preparation Task to complete. In this case, other Tasks of the Job can start executing on the Compute Node while the Job Preparation Task is still running; and even if the Job Preparation Task fails, new Tasks will continue to be scheduled on the Compute Node. The default value is true.
+ WaitForSuccess *bool `json:"waitForSuccess,omitempty"`
+ // UserIdentity - If omitted, the Task runs as a non-administrative user unique to the Task on Windows Compute Nodes, or a non-administrative user unique to the Pool on Linux Compute Nodes.
+ UserIdentity *UserIdentity `json:"userIdentity,omitempty"`
+ // RerunOnNodeRebootAfterSuccess - The Job Preparation Task is always rerun if a Compute Node is reimaged, or if the Job Preparation Task did not complete (e.g. because the reboot occurred while the Task was running). Therefore, you should always write a Job Preparation Task to be idempotent and to behave correctly if run multiple times. The default value is true.
+ RerunOnNodeRebootAfterSuccess *bool `json:"rerunOnNodeRebootAfterSuccess,omitempty"`
+}
+
+// JobPreparationTaskExecutionInformation ...
+type JobPreparationTaskExecutionInformation struct {
+ // StartTime - If the Task has been restarted or retried, this is the most recent time at which the Task started running.
+ StartTime *date.Time `json:"startTime,omitempty"`
+ // EndTime - This property is set only if the Task is in the Completed state.
+ EndTime *date.Time `json:"endTime,omitempty"`
+ // State - Possible values include: 'JobPreparationTaskStateRunning', 'JobPreparationTaskStateCompleted'
+ State JobPreparationTaskState `json:"state,omitempty"`
+ TaskRootDirectory *string `json:"taskRootDirectory,omitempty"`
+ TaskRootDirectoryURL *string `json:"taskRootDirectoryUrl,omitempty"`
+ // ExitCode - This parameter is returned only if the Task is in the completed state. The exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. Note that the exit code may also be generated by the Compute Node operating system, such as when a process is forcibly terminated.
+ ExitCode *int32 `json:"exitCode,omitempty"`
+ // ContainerInfo - This property is set only if the Task runs in a container context.
+ ContainerInfo *TaskContainerExecutionInformation `json:"containerInfo,omitempty"`
+ // FailureInfo - This property is set only if the Task is in the completed state and encountered a failure.
+ FailureInfo *TaskFailureInformation `json:"failureInfo,omitempty"`
+ // RetryCount - Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints.
+ RetryCount *int32 `json:"retryCount,omitempty"`
+ // LastRetryTime - This property is set only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not.
+ LastRetryTime *date.Time `json:"lastRetryTime,omitempty"`
+ // Result - If the value is 'failed', then the details of the failure can be found in the failureInfo property. Possible values include: 'Success', 'Failure'
+ Result TaskExecutionResult `json:"result,omitempty"`
+}
+
+// JobReleaseTask the Job Release Task runs when the Job ends, because of one of the following: The user
+// calls the Terminate Job API, or the Delete Job API while the Job is still active, the Job's maximum wall
+// clock time constraint is reached, and the Job is still active, or the Job's Job Manager Task completed,
+// and the Job is configured to terminate when the Job Manager completes. The Job Release Task runs on each
+// Node where Tasks of the Job have run and the Job Preparation Task ran and completed. If you reimage a
+// Node after it has run the Job Preparation Task, and the Job ends without any further Tasks of the Job
+// running on that Node (and hence the Job Preparation Task does not re-run), then the Job Release Task
+// does not run on that Compute Node. If a Node reboots while the Job Release Task is still running, the
+// Job Release Task runs again when the Compute Node starts up. The Job is not marked as complete until all
+// Job Release Tasks have completed. The Job Release Task runs in the background. It does not occupy a
+// scheduling slot; that is, it does not count towards the maxTasksPerNode limit specified on the Pool.
+type JobReleaseTask struct {
+ // ID - The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobrelease'. No other Task in the Job can have the same ID as the Job Release Task. If you try to submit a Task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict).
+ ID *string `json:"id,omitempty"`
+ // CommandLine - The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables).
+ CommandLine *string `json:"commandLine,omitempty"`
+ // ContainerSettings - When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files.
+ ContainerSettings *TaskContainerSettings `json:"containerSettings,omitempty"`
+ // ResourceFiles - Files listed under this element are located in the Task's working directory.
+ ResourceFiles *[]ResourceFile `json:"resourceFiles,omitempty"`
+ EnvironmentSettings *[]EnvironmentSetting `json:"environmentSettings,omitempty"`
+ MaxWallClockTime *string `json:"maxWallClockTime,omitempty"`
+ // RetentionTime - The default is 7 days, i.e. the Task directory will be retained for 7 days unless the Compute Node is removed or the Job is deleted.
+ RetentionTime *string `json:"retentionTime,omitempty"`
+ // UserIdentity - If omitted, the Task runs as a non-administrative user unique to the Task.
+ UserIdentity *UserIdentity `json:"userIdentity,omitempty"`
+}
+
+// JobReleaseTaskExecutionInformation ...
+type JobReleaseTaskExecutionInformation struct {
+ // StartTime - If the Task has been restarted or retried, this is the most recent time at which the Task started running.
+ StartTime *date.Time `json:"startTime,omitempty"`
+ // EndTime - This property is set only if the Task is in the Completed state.
+ EndTime *date.Time `json:"endTime,omitempty"`
+ // State - Possible values include: 'JobReleaseTaskStateRunning', 'JobReleaseTaskStateCompleted'
+ State JobReleaseTaskState `json:"state,omitempty"`
+ TaskRootDirectory *string `json:"taskRootDirectory,omitempty"`
+ TaskRootDirectoryURL *string `json:"taskRootDirectoryUrl,omitempty"`
+ // ExitCode - This parameter is returned only if the Task is in the completed state. The exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. Note that the exit code may also be generated by the Compute Node operating system, such as when a process is forcibly terminated.
+ ExitCode *int32 `json:"exitCode,omitempty"`
+ // ContainerInfo - This property is set only if the Task runs in a container context.
+ ContainerInfo *TaskContainerExecutionInformation `json:"containerInfo,omitempty"`
+ // FailureInfo - This property is set only if the Task is in the completed state and encountered a failure.
+ FailureInfo *TaskFailureInformation `json:"failureInfo,omitempty"`
+ // Result - If the value is 'failed', then the details of the failure can be found in the failureInfo property. Possible values include: 'Success', 'Failure'
+ Result TaskExecutionResult `json:"result,omitempty"`
+}
+
+// JobScheduleAddParameter ...
+type JobScheduleAddParameter struct {
+ // ID - The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case).
+ ID *string `json:"id,omitempty"`
+ // DisplayName - The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024.
+ DisplayName *string `json:"displayName,omitempty"`
+ Schedule *Schedule `json:"schedule,omitempty"`
+ JobSpecification *JobSpecification `json:"jobSpecification,omitempty"`
+ // Metadata - The Batch service does not assign any meaning to metadata; it is solely for the use of user code.
+ Metadata *[]MetadataItem `json:"metadata,omitempty"`
+}
+
+// JobScheduleExecutionInformation ...
+type JobScheduleExecutionInformation struct {
+ // NextRunTime - This property is meaningful only if the schedule is in the active state when the time comes around. For example, if the schedule is disabled, no Job will be created at nextRunTime unless the Job is enabled before then.
+ NextRunTime *date.Time `json:"nextRunTime,omitempty"`
+ // RecentJob - This property is present only if the at least one Job has run under the schedule.
+ RecentJob *RecentJob `json:"recentJob,omitempty"`
+ // EndTime - This property is set only if the Job Schedule is in the completed state.
+ EndTime *date.Time `json:"endTime,omitempty"`
+}
+
+// JobSchedulePatchParameter ...
+type JobSchedulePatchParameter struct {
+ // Schedule - If you do not specify this element, the existing schedule is left unchanged.
+ Schedule *Schedule `json:"schedule,omitempty"`
+ // JobSpecification - Updates affect only Jobs that are started after the update has taken place. Any currently active Job continues with the older specification.
+ JobSpecification *JobSpecification `json:"jobSpecification,omitempty"`
+ // Metadata - If you do not specify this element, existing metadata is left unchanged.
+ Metadata *[]MetadataItem `json:"metadata,omitempty"`
+}
+
+// JobScheduleStatistics ...
+type JobScheduleStatistics struct {
+ URL *string `json:"url,omitempty"`
+ StartTime *date.Time `json:"startTime,omitempty"`
+ LastUpdateTime *date.Time `json:"lastUpdateTime,omitempty"`
+ UserCPUTime *string `json:"userCPUTime,omitempty"`
+ KernelCPUTime *string `json:"kernelCPUTime,omitempty"`
+ // WallClockTime - The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries.
+ WallClockTime *string `json:"wallClockTime,omitempty"`
+ ReadIOps *int64 `json:"readIOps,omitempty"`
+ WriteIOps *int64 `json:"writeIOps,omitempty"`
+ ReadIOGiB *float64 `json:"readIOGiB,omitempty"`
+ WriteIOGiB *float64 `json:"writeIOGiB,omitempty"`
+ NumSucceededTasks *int64 `json:"numSucceededTasks,omitempty"`
+ NumFailedTasks *int64 `json:"numFailedTasks,omitempty"`
+ NumTaskRetries *int64 `json:"numTaskRetries,omitempty"`
+ // WaitTime - This value is only reported in the Account lifetime statistics; it is not included in the Job statistics.
+ WaitTime *string `json:"waitTime,omitempty"`
+}
+
+// JobScheduleUpdateParameter ...
+type JobScheduleUpdateParameter struct {
+ // Schedule - If you do not specify this element, it is equivalent to passing the default schedule: that is, a single Job scheduled to run immediately.
+ Schedule *Schedule `json:"schedule,omitempty"`
+ // JobSpecification - Updates affect only Jobs that are started after the update has taken place. Any currently active Job continues with the older specification.
+ JobSpecification *JobSpecification `json:"jobSpecification,omitempty"`
+ // Metadata - If you do not specify this element, it takes the default value of an empty list; in effect, any existing metadata is deleted.
+ Metadata *[]MetadataItem `json:"metadata,omitempty"`
+}
+
+// JobSchedulingError ...
+type JobSchedulingError struct {
+ // Category - Possible values include: 'UserError', 'ServerError'
+ Category ErrorCategory `json:"category,omitempty"`
+ Code *string `json:"code,omitempty"`
+ Message *string `json:"message,omitempty"`
+ Details *[]NameValuePair `json:"details,omitempty"`
+}
+
+// JobSpecification ...
+type JobSpecification struct {
+ // Priority - Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. This priority is used as the default for all Jobs under the Job Schedule. You can update a Job's priority after it has been created using by using the update Job API.
+ Priority *int32 `json:"priority,omitempty"`
+ // DisplayName - The name need not be unique and can contain any Unicode characters up to a maximum length of 1024.
+ DisplayName *string `json:"displayName,omitempty"`
+ UsesTaskDependencies *bool `json:"usesTaskDependencies,omitempty"`
+ // OnAllTasksComplete - Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. Possible values include: 'NoAction', 'TerminateJob'
+ OnAllTasksComplete OnAllTasksComplete `json:"onAllTasksComplete,omitempty"`
+ // OnTaskFailure - The default is noaction. Possible values include: 'OnTaskFailureNoAction', 'OnTaskFailurePerformExitOptionsJobAction'
+ OnTaskFailure OnTaskFailure `json:"onTaskFailure,omitempty"`
+ NetworkConfiguration *JobNetworkConfiguration `json:"networkConfiguration,omitempty"`
+ Constraints *JobConstraints `json:"constraints,omitempty"`
+ // JobManagerTask - If the Job does not specify a Job Manager Task, the user must explicitly add Tasks to the Job using the Task API. If the Job does specify a Job Manager Task, the Batch service creates the Job Manager Task when the Job is created, and will try to schedule the Job Manager Task before scheduling other Tasks in the Job.
+ JobManagerTask *JobManagerTask `json:"jobManagerTask,omitempty"`
+ // JobPreparationTask - If a Job has a Job Preparation Task, the Batch service will run the Job Preparation Task on a Node before starting any Tasks of that Job on that Compute Node.
+ JobPreparationTask *JobPreparationTask `json:"jobPreparationTask,omitempty"`
+ // JobReleaseTask - The primary purpose of the Job Release Task is to undo changes to Nodes made by the Job Preparation Task. Example activities include deleting local files, or shutting down services that were started as part of Job preparation. A Job Release Task cannot be specified without also specifying a Job Preparation Task for the Job. The Batch service runs the Job Release Task on the Compute Nodes that have run the Job Preparation Task.
+ JobReleaseTask *JobReleaseTask `json:"jobReleaseTask,omitempty"`
+ // CommonEnvironmentSettings - Individual Tasks can override an environment setting specified here by specifying the same setting name with a different value.
+ CommonEnvironmentSettings *[]EnvironmentSetting `json:"commonEnvironmentSettings,omitempty"`
+ PoolInfo *PoolInformation `json:"poolInfo,omitempty"`
+ // Metadata - The Batch service does not assign any meaning to metadata; it is solely for the use of user code.
+ Metadata *[]MetadataItem `json:"metadata,omitempty"`
+}
+
+// JobStatistics ...
+type JobStatistics struct {
+ autorest.Response `json:"-"`
+ URL *string `json:"url,omitempty"`
+ StartTime *date.Time `json:"startTime,omitempty"`
+ LastUpdateTime *date.Time `json:"lastUpdateTime,omitempty"`
+ UserCPUTime *string `json:"userCPUTime,omitempty"`
+ KernelCPUTime *string `json:"kernelCPUTime,omitempty"`
+ // WallClockTime - The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries.
+ WallClockTime *string `json:"wallClockTime,omitempty"`
+ ReadIOps *int64 `json:"readIOps,omitempty"`
+ WriteIOps *int64 `json:"writeIOps,omitempty"`
+ ReadIOGiB *float64 `json:"readIOGiB,omitempty"`
+ WriteIOGiB *float64 `json:"writeIOGiB,omitempty"`
+ // NumSucceededTasks - A Task completes successfully if it returns exit code 0.
+ NumSucceededTasks *int64 `json:"numSucceededTasks,omitempty"`
+ // NumFailedTasks - A Task fails if it exhausts its maximum retry count without returning exit code 0.
+ NumFailedTasks *int64 `json:"numFailedTasks,omitempty"`
+ NumTaskRetries *int64 `json:"numTaskRetries,omitempty"`
+ // WaitTime - The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.) This value is only reported in the Account lifetime statistics; it is not included in the Job statistics.
+ WaitTime *string `json:"waitTime,omitempty"`
+}
+
+// JobTerminateParameter ...
+type JobTerminateParameter struct {
+ TerminateReason *string `json:"terminateReason,omitempty"`
+}
+
+// JobUpdateParameter ...
+type JobUpdateParameter struct {
+ // Priority - Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If omitted, it is set to the default value 0.
+ Priority *int32 `json:"priority,omitempty"`
+ // Constraints - If omitted, the constraints are cleared.
+ Constraints *JobConstraints `json:"constraints,omitempty"`
+ // PoolInfo - You may change the Pool for a Job only when the Job is disabled. The Update Job call will fail if you include the poolInfo element and the Job is not disabled. If you specify an autoPoolSpecification in the poolInfo, only the keepAlive property of the autoPoolSpecification can be updated, and then only if the autoPoolSpecification has a poolLifetimeOption of Job (other job properties can be updated as normal).
+ PoolInfo *PoolInformation `json:"poolInfo,omitempty"`
+ // Metadata - If omitted, it takes the default value of an empty list; in effect, any existing metadata is deleted.
+ Metadata *[]MetadataItem `json:"metadata,omitempty"`
+ // OnAllTasksComplete - If omitted, the completion behavior is set to noaction. If the current value is terminatejob, this is an error because a Job's completion behavior may not be changed from terminatejob to noaction. You may not change the value from terminatejob to noaction - that is, once you have engaged automatic Job termination, you cannot turn it off again. If you try to do this, the request fails and Batch returns status code 400 (Bad Request) and an 'invalid property value' error response. If you do not specify this element in a PUT request, it is equivalent to passing noaction. This is an error if the current value is terminatejob. Possible values include: 'NoAction', 'TerminateJob'
+ OnAllTasksComplete OnAllTasksComplete `json:"onAllTasksComplete,omitempty"`
+}
+
+// LinuxUserConfiguration ...
+type LinuxUserConfiguration struct {
+ // UID - The uid and gid properties must be specified together or not at all. If not specified the underlying operating system picks the uid.
+ UID *int32 `json:"uid,omitempty"`
+ // Gid - The uid and gid properties must be specified together or not at all. If not specified the underlying operating system picks the gid.
+ Gid *int32 `json:"gid,omitempty"`
+ // SSHPrivateKey - The private key must not be password protected. The private key is used to automatically configure asymmetric-key based authentication for SSH between Compute Nodes in a Linux Pool when the Pool's enableInterNodeCommunication property is true (it is ignored if enableInterNodeCommunication is false). It does this by placing the key pair into the user's .ssh directory. If not specified, password-less SSH is not configured between Compute Nodes (no modification of the user's .ssh directory is done).
+ SSHPrivateKey *string `json:"sshPrivateKey,omitempty"`
+}
+
+// MetadataItem the Batch service does not assign any meaning to this metadata; it is solely for the use of
+// user code.
+type MetadataItem struct {
+ Name *string `json:"name,omitempty"`
+ Value *string `json:"value,omitempty"`
+}
+
+// MountConfiguration ...
+type MountConfiguration struct {
+ // AzureBlobFileSystemConfiguration - This property is mutually exclusive with all other properties.
+ AzureBlobFileSystemConfiguration *AzureBlobFileSystemConfiguration `json:"azureBlobFileSystemConfiguration,omitempty"`
+ // NfsMountConfiguration - This property is mutually exclusive with all other properties.
+ NfsMountConfiguration *NFSMountConfiguration `json:"nfsMountConfiguration,omitempty"`
+ // CifsMountConfiguration - This property is mutually exclusive with all other properties.
+ CifsMountConfiguration *CIFSMountConfiguration `json:"cifsMountConfiguration,omitempty"`
+ // AzureFileShareConfiguration - This property is mutually exclusive with all other properties.
+ AzureFileShareConfiguration *AzureFileShareConfiguration `json:"azureFileShareConfiguration,omitempty"`
+}
+
+// MultiInstanceSettings multi-instance Tasks are commonly used to support MPI Tasks. In the MPI case, if
+// any of the subtasks fail (for example due to exiting with a non-zero exit code) the entire
+// multi-instance Task fails. The multi-instance Task is then terminated and retried, up to its retry
+// limit.
+type MultiInstanceSettings struct {
+ // NumberOfInstances - If omitted, the default is 1.
+ NumberOfInstances *int32 `json:"numberOfInstances,omitempty"`
+ // CoordinationCommandLine - A typical coordination command line launches a background service and verifies that the service is ready to process inter-node messages.
+ CoordinationCommandLine *string `json:"coordinationCommandLine,omitempty"`
+ // CommonResourceFiles - The difference between common resource files and Task resource files is that common resource files are downloaded for all subtasks including the primary, whereas Task resource files are downloaded only for the primary. Also note that these resource files are not downloaded to the Task working directory, but instead are downloaded to the Task root directory (one directory above the working directory). There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers.
+ CommonResourceFiles *[]ResourceFile `json:"commonResourceFiles,omitempty"`
+}
+
+// NameValuePair ...
+type NameValuePair struct {
+ Name *string `json:"name,omitempty"`
+ Value *string `json:"value,omitempty"`
+}
+
+// NetworkConfiguration the network configuration for a Pool.
+type NetworkConfiguration struct {
+ // SubnetID - The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have enough free IP addresses, the Pool will partially allocate Nodes, and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. For Pools created with virtualMachineConfiguration only ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported, but for Pools created with cloudServiceConfiguration both ARM and classic virtual networks are supported. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication. For Pools created with a virtual machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. For Pools created with a cloud service configuration, enable ports 10100, 20100, and 30100. Also enable outbound connections to Azure Storage on port 443. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration
+ SubnetID *string `json:"subnetId,omitempty"`
+ // DynamicVNetAssignmentScope - Possible values include: 'DynamicVNetAssignmentScopeNone', 'DynamicVNetAssignmentScopeJob'
+ DynamicVNetAssignmentScope DynamicVNetAssignmentScope `json:"dynamicVNetAssignmentScope,omitempty"`
+ // EndpointConfiguration - Pool endpoint configuration is only supported on Pools with the virtualMachineConfiguration property.
+ EndpointConfiguration *PoolEndpointConfiguration `json:"endpointConfiguration,omitempty"`
+ // PublicIPs - The number of IPs specified here limits the maximum size of the Pool - 50 dedicated nodes or 20 low-priority nodes can be allocated for each public IP. For example, a pool needing 150 dedicated VMs would need at least 3 public IPs specified. Each element of this collection is of the form: /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}.
+ PublicIPs *[]string `json:"publicIPs,omitempty"`
+}
+
+// NetworkSecurityGroupRule ...
+type NetworkSecurityGroupRule struct {
+ // Priority - Priorities within a Pool must be unique and are evaluated in order of priority. The lower the number the higher the priority. For example, rules could be specified with order numbers of 150, 250, and 350. The rule with the order number of 150 takes precedence over the rule that has an order of 250. Allowed priorities are 150 to 3500. If any reserved or duplicate values are provided the request fails with HTTP status code 400.
+ Priority *int32 `json:"priority,omitempty"`
+ // Access - Possible values include: 'Allow', 'Deny'
+ Access NetworkSecurityGroupRuleAccess `json:"access,omitempty"`
+ // SourceAddressPrefix - Valid values are a single IP address (i.e. 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, or * (for all addresses). If any other values are provided the request fails with HTTP status code 400.
+ SourceAddressPrefix *string `json:"sourceAddressPrefix,omitempty"`
+ // SourcePortRanges - Valid values are '*' (for all ports 0 - 65535), a specific port (i.e. 22), or a port range (i.e. 100-200). The ports must be in the range of 0 to 65535. Each entry in this collection must not overlap any other entry (either a range or an individual port). If any other values are provided the request fails with HTTP status code 400. The default value is '*'.
+ SourcePortRanges *[]string `json:"sourcePortRanges,omitempty"`
+}
+
+// NFSMountConfiguration ...
+type NFSMountConfiguration struct {
+ Source *string `json:"source,omitempty"`
+ // RelativeMountPath - All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable.
+ RelativeMountPath *string `json:"relativeMountPath,omitempty"`
+ // MountOptions - These are 'net use' options in Windows and 'mount' options in Linux.
+ MountOptions *string `json:"mountOptions,omitempty"`
+}
+
+// NodeAgentInformation the Batch Compute Node agent is a program that runs on each Compute Node in the
+// Pool and provides Batch capability on the Compute Node.
+type NodeAgentInformation struct {
+ // Version - This version number can be checked against the Compute Node agent release notes located at https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md.
+ Version *string `json:"version,omitempty"`
+ // LastUpdateTime - This is the most recent time that the Compute Node agent was updated to a new version.
+ LastUpdateTime *date.Time `json:"lastUpdateTime,omitempty"`
+}
+
+// NodeCounts ...
+type NodeCounts struct {
+ Creating *int32 `json:"creating,omitempty"`
+ Idle *int32 `json:"idle,omitempty"`
+ Offline *int32 `json:"offline,omitempty"`
+ Preempted *int32 `json:"preempted,omitempty"`
+ Rebooting *int32 `json:"rebooting,omitempty"`
+ Reimaging *int32 `json:"reimaging,omitempty"`
+ Running *int32 `json:"running,omitempty"`
+ Starting *int32 `json:"starting,omitempty"`
+ StartTaskFailed *int32 `json:"startTaskFailed,omitempty"`
+ LeavingPool *int32 `json:"leavingPool,omitempty"`
+ Unknown *int32 `json:"unknown,omitempty"`
+ Unusable *int32 `json:"unusable,omitempty"`
+ WaitingForStartTask *int32 `json:"waitingForStartTask,omitempty"`
+ Total *int32 `json:"total,omitempty"`
+}
+
+// NodeDisableSchedulingParameter ...
+type NodeDisableSchedulingParameter struct {
+ // NodeDisableSchedulingOption - The default value is requeue. Possible values include: 'DisableComputeNodeSchedulingOptionRequeue', 'DisableComputeNodeSchedulingOptionTerminate', 'DisableComputeNodeSchedulingOptionTaskCompletion'
+ NodeDisableSchedulingOption DisableComputeNodeSchedulingOption `json:"nodeDisableSchedulingOption,omitempty"`
+}
+
+// NodeFile ...
+type NodeFile struct {
+ Name *string `json:"name,omitempty"`
+ URL *string `json:"url,omitempty"`
+ IsDirectory *bool `json:"isDirectory,omitempty"`
+ Properties *FileProperties `json:"properties,omitempty"`
+}
+
+// NodeFileListResult ...
+type NodeFileListResult struct {
+ autorest.Response `json:"-"`
+ Value *[]NodeFile `json:"value,omitempty"`
+ OdataNextLink *string `json:"odata.nextLink,omitempty"`
+}
+
+// NodeFileListResultIterator provides access to a complete listing of NodeFile values.
+type NodeFileListResultIterator struct {
+ i int
+ page NodeFileListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *NodeFileListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/NodeFileListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *NodeFileListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter NodeFileListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter NodeFileListResultIterator) Response() NodeFileListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter NodeFileListResultIterator) Value() NodeFile {
+ if !iter.page.NotDone() {
+ return NodeFile{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the NodeFileListResultIterator type.
+func NewNodeFileListResultIterator(page NodeFileListResultPage) NodeFileListResultIterator {
+ return NodeFileListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (nflr NodeFileListResult) IsEmpty() bool {
+ return nflr.Value == nil || len(*nflr.Value) == 0
+}
+
+// nodeFileListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (nflr NodeFileListResult) nodeFileListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if nflr.OdataNextLink == nil || len(to.String(nflr.OdataNextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(nflr.OdataNextLink)))
+}
+
+// NodeFileListResultPage contains a page of NodeFile values.
+type NodeFileListResultPage struct {
+ fn func(context.Context, NodeFileListResult) (NodeFileListResult, error)
+ nflr NodeFileListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *NodeFileListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/NodeFileListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.nflr)
+ if err != nil {
+ return err
+ }
+ page.nflr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *NodeFileListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page NodeFileListResultPage) NotDone() bool {
+ return !page.nflr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page NodeFileListResultPage) Response() NodeFileListResult {
+ return page.nflr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page NodeFileListResultPage) Values() []NodeFile {
+ if page.nflr.IsEmpty() {
+ return nil
+ }
+ return *page.nflr.Value
+}
+
+// Creates a new instance of the NodeFileListResultPage type.
+func NewNodeFileListResultPage(getNextPage func(context.Context, NodeFileListResult) (NodeFileListResult, error)) NodeFileListResultPage {
+ return NodeFileListResultPage{fn: getNextPage}
+}
+
+// NodeRebootParameter ...
+type NodeRebootParameter struct {
+ // NodeRebootOption - The default value is requeue. Possible values include: 'ComputeNodeRebootOptionRequeue', 'ComputeNodeRebootOptionTerminate', 'ComputeNodeRebootOptionTaskCompletion', 'ComputeNodeRebootOptionRetainedData'
+ NodeRebootOption ComputeNodeRebootOption `json:"nodeRebootOption,omitempty"`
+}
+
+// NodeReimageParameter ...
+type NodeReimageParameter struct {
+ // NodeReimageOption - The default value is requeue. Possible values include: 'ComputeNodeReimageOptionRequeue', 'ComputeNodeReimageOptionTerminate', 'ComputeNodeReimageOptionTaskCompletion', 'ComputeNodeReimageOptionRetainedData'
+ NodeReimageOption ComputeNodeReimageOption `json:"nodeReimageOption,omitempty"`
+}
+
+// NodeRemoveParameter ...
+type NodeRemoveParameter struct {
+ NodeList *[]string `json:"nodeList,omitempty"`
+ // ResizeTimeout - The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request).
+ ResizeTimeout *string `json:"resizeTimeout,omitempty"`
+ // NodeDeallocationOption - The default value is requeue. Possible values include: 'Requeue', 'Terminate', 'TaskCompletion', 'RetainedData'
+ NodeDeallocationOption ComputeNodeDeallocationOption `json:"nodeDeallocationOption,omitempty"`
+}
+
+// NodeUpdateUserParameter ...
+type NodeUpdateUserParameter struct {
+ // Password - The password is required for Windows Compute Nodes (those created with 'cloudServiceConfiguration', or created with 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. If omitted, any existing password is removed.
+ Password *string `json:"password,omitempty"`
+ // ExpiryTime - If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day.
+ ExpiryTime *date.Time `json:"expiryTime,omitempty"`
+ // SSHPublicKey - The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This property can be specified only for Linux Compute Nodes. If this is specified for a Windows Compute Node, then the Batch service rejects the request; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If omitted, any existing SSH public key is removed.
+ SSHPublicKey *string `json:"sshPublicKey,omitempty"`
+}
+
+// OutputFile ...
+type OutputFile struct {
+ // FilePattern - Both relative and absolute paths are supported. Relative paths are relative to the Task working directory. The following wildcards are supported: * matches 0 or more characters (for example pattern abc* would match abc or abcdef), ** matches any directory, ? matches any single character, [abc] matches one character in the brackets, and [a-c] matches one character in the range. Brackets can include a negation to match any character not specified (for example [!abc] matches any character but a, b, or c). If a file name starts with "." it is ignored by default but may be matched by specifying it explicitly (for example *.gif will not match .a.gif, but .*.gif will). A simple example: **\*.txt matches any file that does not start in '.' and ends with .txt in the Task working directory or any subdirectory. If the filename contains a wildcard character it can be escaped using brackets (for example abc[*] would match a file named abc*). Note that both \ and / are treated as directory separators on Windows, but only / is on Linux. Environment variables (%var% on Windows or $var on Linux) are expanded prior to the pattern being applied.
+ FilePattern *string `json:"filePattern,omitempty"`
+ Destination *OutputFileDestination `json:"destination,omitempty"`
+ UploadOptions *OutputFileUploadOptions `json:"uploadOptions,omitempty"`
+}
+
+// OutputFileBlobContainerDestination ...
+type OutputFileBlobContainerDestination struct {
+ // Path - If filePattern refers to a specific file (i.e. contains no wildcards), then path is the name of the blob to which to upload that file. If filePattern contains one or more wildcards (and therefore may match multiple files), then path is the name of the blob virtual directory (which is prepended to each blob name) to which to upload the file(s). If omitted, file(s) are uploaded to the root of the container with a blob name matching their file name.
+ Path *string `json:"path,omitempty"`
+ // ContainerURL - The URL must include a Shared Access Signature (SAS) granting write permissions to the container.
+ ContainerURL *string `json:"containerUrl,omitempty"`
+}
+
+// OutputFileDestination ...
+type OutputFileDestination struct {
+ Container *OutputFileBlobContainerDestination `json:"container,omitempty"`
+}
+
+// OutputFileUploadOptions ...
+type OutputFileUploadOptions struct {
+ // UploadCondition - The default is taskcompletion. Possible values include: 'OutputFileUploadConditionTaskSuccess', 'OutputFileUploadConditionTaskFailure', 'OutputFileUploadConditionTaskCompletion'
+ UploadCondition OutputFileUploadCondition `json:"uploadCondition,omitempty"`
+}
+
+// PoolAddParameter ...
+type PoolAddParameter struct {
+ // ID - The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two Pool IDs within an Account that differ only by case).
+ ID *string `json:"id,omitempty"`
+ // DisplayName - The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024.
+ DisplayName *string `json:"displayName,omitempty"`
+ // VMSize - For information about available sizes of virtual machines for Cloud Services Pools (pools created with cloudServiceConfiguration), see Sizes for Cloud Services (https://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/). Batch supports all Cloud Services VM sizes except ExtraSmall, A1V2 and A2V2. For information about available VM sizes for Pools using Images from the Virtual Machines Marketplace (pools created with virtualMachineConfiguration) see Sizes for Virtual Machines (Linux) (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) or Sizes for Virtual Machines (Windows) (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series).
+ VMSize *string `json:"vmSize,omitempty"`
+ // CloudServiceConfiguration - This property and virtualMachineConfiguration are mutually exclusive and one of the properties must be specified. This property cannot be specified if the Batch Account was created with its poolAllocationMode property set to 'UserSubscription'.
+ CloudServiceConfiguration *CloudServiceConfiguration `json:"cloudServiceConfiguration,omitempty"`
+ // VirtualMachineConfiguration - This property and cloudServiceConfiguration are mutually exclusive and one of the properties must be specified.
+ VirtualMachineConfiguration *VirtualMachineConfiguration `json:"virtualMachineConfiguration,omitempty"`
+ // ResizeTimeout - This timeout applies only to manual scaling; it has no effect when enableAutoScale is set to true. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request).
+ ResizeTimeout *string `json:"resizeTimeout,omitempty"`
+ // TargetDedicatedNodes - This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both.
+ TargetDedicatedNodes *int32 `json:"targetDedicatedNodes,omitempty"`
+ // TargetLowPriorityNodes - This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both.
+ TargetLowPriorityNodes *int32 `json:"targetLowPriorityNodes,omitempty"`
+ // EnableAutoScale - If false, at least one of targetDedicateNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula property is required and the Pool automatically resizes according to the formula. The default value is false.
+ EnableAutoScale *bool `json:"enableAutoScale,omitempty"`
+ // AutoScaleFormula - This property must not be specified if enableAutoScale is set to false. It is required if enableAutoScale is set to true. The formula is checked for validity before the Pool is created. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see 'Automatically scale Compute Nodes in an Azure Batch Pool' (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/).
+ AutoScaleFormula *string `json:"autoScaleFormula,omitempty"`
+ // AutoScaleEvaluationInterval - The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request).
+ AutoScaleEvaluationInterval *string `json:"autoScaleEvaluationInterval,omitempty"`
+ // EnableInterNodeCommunication - Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false.
+ EnableInterNodeCommunication *bool `json:"enableInterNodeCommunication,omitempty"`
+ NetworkConfiguration *NetworkConfiguration `json:"networkConfiguration,omitempty"`
+ // StartTask - The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted.
+ StartTask *StartTask `json:"startTask,omitempty"`
+ // CertificateReferences - For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory.
+ CertificateReferences *[]CertificateReference `json:"certificateReferences,omitempty"`
+ // ApplicationPackageReferences - Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool.
+ ApplicationPackageReferences *[]ApplicationPackageReference `json:"applicationPackageReferences,omitempty"`
+ // ApplicationLicenses - The list of application licenses must be a subset of available Batch service application licenses. If a license is requested which is not supported, Pool creation will fail.
+ ApplicationLicenses *[]string `json:"applicationLicenses,omitempty"`
+ // MaxTasksPerNode - The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the Pool or 256.
+ MaxTasksPerNode *int32 `json:"maxTasksPerNode,omitempty"`
+ // TaskSchedulingPolicy - If not specified, the default is spread.
+ TaskSchedulingPolicy *TaskSchedulingPolicy `json:"taskSchedulingPolicy,omitempty"`
+ UserAccounts *[]UserAccount `json:"userAccounts,omitempty"`
+ // Metadata - The Batch service does not assign any meaning to metadata; it is solely for the use of user code.
+ Metadata *[]MetadataItem `json:"metadata,omitempty"`
+ // MountConfiguration - Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system.
+ MountConfiguration *[]MountConfiguration `json:"mountConfiguration,omitempty"`
+}
+
+// PoolEnableAutoScaleParameter ...
+type PoolEnableAutoScaleParameter struct {
+ // AutoScaleFormula - The formula is checked for validity before it is applied to the Pool. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling).
+ AutoScaleFormula *string `json:"autoScaleFormula,omitempty"`
+ // AutoScaleEvaluationInterval - The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If you specify a new interval, then the existing autoscale evaluation schedule will be stopped and a new autoscale evaluation schedule will be started, with its starting time being the time when this request was issued.
+ AutoScaleEvaluationInterval *string `json:"autoScaleEvaluationInterval,omitempty"`
+}
+
+// PoolEndpointConfiguration ...
+type PoolEndpointConfiguration struct {
+ // InboundNATPools - The maximum number of inbound NAT Pools per Batch Pool is 5. If the maximum number of inbound NAT Pools is exceeded the request fails with HTTP status code 400.
+ InboundNATPools *[]InboundNATPool `json:"inboundNATPools,omitempty"`
+}
+
+// PoolEvaluateAutoScaleParameter ...
+type PoolEvaluateAutoScaleParameter struct {
+ // AutoScaleFormula - The formula is validated and its results calculated, but it is not applied to the Pool. To apply the formula to the Pool, 'Enable automatic scaling on a Pool'. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling).
+ AutoScaleFormula *string `json:"autoScaleFormula,omitempty"`
+}
+
+// PoolInformation ...
+type PoolInformation struct {
+ // PoolID - You must ensure that the Pool referenced by this property exists. If the Pool does not exist at the time the Batch service tries to schedule a Job, no Tasks for the Job will run until you create a Pool with that id. Note that the Batch service will not reject the Job request; it will simply not run Tasks until the Pool exists. You must specify either the Pool ID or the auto Pool specification, but not both.
+ PoolID *string `json:"poolId,omitempty"`
+ // AutoPoolSpecification - If auto Pool creation fails, the Batch service moves the Job to a completed state, and the Pool creation error is set in the Job's scheduling error property. The Batch service manages the lifetime (both creation and, unless keepAlive is specified, deletion) of the auto Pool. Any user actions that affect the lifetime of the auto Pool while the Job is active will result in unexpected behavior. You must specify either the Pool ID or the auto Pool specification, but not both.
+ AutoPoolSpecification *AutoPoolSpecification `json:"autoPoolSpecification,omitempty"`
+}
+
+// PoolListUsageMetricsResult ...
+type PoolListUsageMetricsResult struct {
+ autorest.Response `json:"-"`
+ Value *[]PoolUsageMetrics `json:"value,omitempty"`
+ OdataNextLink *string `json:"odata.nextLink,omitempty"`
+}
+
+// PoolListUsageMetricsResultIterator provides access to a complete listing of PoolUsageMetrics values.
+type PoolListUsageMetricsResultIterator struct {
+ i int
+ page PoolListUsageMetricsResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *PoolListUsageMetricsResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoolListUsageMetricsResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *PoolListUsageMetricsResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter PoolListUsageMetricsResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter PoolListUsageMetricsResultIterator) Response() PoolListUsageMetricsResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter PoolListUsageMetricsResultIterator) Value() PoolUsageMetrics {
+ if !iter.page.NotDone() {
+ return PoolUsageMetrics{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the PoolListUsageMetricsResultIterator type.
+func NewPoolListUsageMetricsResultIterator(page PoolListUsageMetricsResultPage) PoolListUsageMetricsResultIterator {
+ return PoolListUsageMetricsResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (plumr PoolListUsageMetricsResult) IsEmpty() bool {
+ return plumr.Value == nil || len(*plumr.Value) == 0
+}
+
+// poolListUsageMetricsResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (plumr PoolListUsageMetricsResult) poolListUsageMetricsResultPreparer(ctx context.Context) (*http.Request, error) {
+ if plumr.OdataNextLink == nil || len(to.String(plumr.OdataNextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(plumr.OdataNextLink)))
+}
+
+// PoolListUsageMetricsResultPage contains a page of PoolUsageMetrics values.
+type PoolListUsageMetricsResultPage struct {
+ fn func(context.Context, PoolListUsageMetricsResult) (PoolListUsageMetricsResult, error)
+ plumr PoolListUsageMetricsResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *PoolListUsageMetricsResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoolListUsageMetricsResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.plumr)
+ if err != nil {
+ return err
+ }
+ page.plumr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *PoolListUsageMetricsResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page PoolListUsageMetricsResultPage) NotDone() bool {
+ return !page.plumr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page PoolListUsageMetricsResultPage) Response() PoolListUsageMetricsResult {
+ return page.plumr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page PoolListUsageMetricsResultPage) Values() []PoolUsageMetrics {
+ if page.plumr.IsEmpty() {
+ return nil
+ }
+ return *page.plumr.Value
+}
+
+// Creates a new instance of the PoolListUsageMetricsResultPage type.
+func NewPoolListUsageMetricsResultPage(getNextPage func(context.Context, PoolListUsageMetricsResult) (PoolListUsageMetricsResult, error)) PoolListUsageMetricsResultPage {
+ return PoolListUsageMetricsResultPage{fn: getNextPage}
+}
+
+// PoolNodeCounts ...
+type PoolNodeCounts struct {
+ PoolID *string `json:"poolId,omitempty"`
+ Dedicated *NodeCounts `json:"dedicated,omitempty"`
+ LowPriority *NodeCounts `json:"lowPriority,omitempty"`
+}
+
+// PoolNodeCountsListResult ...
+type PoolNodeCountsListResult struct {
+ autorest.Response `json:"-"`
+ // Value - A list of Compute Node counts by Pool.
+ Value *[]PoolNodeCounts `json:"value,omitempty"`
+ OdataNextLink *string `json:"odata.nextLink,omitempty"`
+}
+
+// PoolNodeCountsListResultIterator provides access to a complete listing of PoolNodeCounts values.
+type PoolNodeCountsListResultIterator struct {
+ i int
+ page PoolNodeCountsListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *PoolNodeCountsListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoolNodeCountsListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *PoolNodeCountsListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter PoolNodeCountsListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter PoolNodeCountsListResultIterator) Response() PoolNodeCountsListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter PoolNodeCountsListResultIterator) Value() PoolNodeCounts {
+ if !iter.page.NotDone() {
+ return PoolNodeCounts{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the PoolNodeCountsListResultIterator type.
+func NewPoolNodeCountsListResultIterator(page PoolNodeCountsListResultPage) PoolNodeCountsListResultIterator {
+ return PoolNodeCountsListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (pnclr PoolNodeCountsListResult) IsEmpty() bool {
+ return pnclr.Value == nil || len(*pnclr.Value) == 0
+}
+
+// poolNodeCountsListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (pnclr PoolNodeCountsListResult) poolNodeCountsListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if pnclr.OdataNextLink == nil || len(to.String(pnclr.OdataNextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(pnclr.OdataNextLink)))
+}
+
+// PoolNodeCountsListResultPage contains a page of PoolNodeCounts values.
+type PoolNodeCountsListResultPage struct {
+ fn func(context.Context, PoolNodeCountsListResult) (PoolNodeCountsListResult, error)
+ pnclr PoolNodeCountsListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *PoolNodeCountsListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoolNodeCountsListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.pnclr)
+ if err != nil {
+ return err
+ }
+ page.pnclr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *PoolNodeCountsListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page PoolNodeCountsListResultPage) NotDone() bool {
+ return !page.pnclr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page PoolNodeCountsListResultPage) Response() PoolNodeCountsListResult {
+ return page.pnclr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page PoolNodeCountsListResultPage) Values() []PoolNodeCounts {
+ if page.pnclr.IsEmpty() {
+ return nil
+ }
+ return *page.pnclr.Value
+}
+
+// Creates a new instance of the PoolNodeCountsListResultPage type.
+func NewPoolNodeCountsListResultPage(getNextPage func(context.Context, PoolNodeCountsListResult) (PoolNodeCountsListResult, error)) PoolNodeCountsListResultPage {
+ return PoolNodeCountsListResultPage{fn: getNextPage}
+}
+
+// PoolPatchParameter ...
+type PoolPatchParameter struct {
+ // StartTask - If this element is present, it overwrites any existing StartTask. If omitted, any existing StartTask is left unchanged.
+ StartTask *StartTask `json:"startTask,omitempty"`
+ // CertificateReferences - If this element is present, it replaces any existing Certificate references configured on the Pool. If omitted, any existing Certificate references are left unchanged. For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory.
+ CertificateReferences *[]CertificateReference `json:"certificateReferences,omitempty"`
+ // ApplicationPackageReferences - Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. If this element is present, it replaces any existing Package references. If you specify an empty collection, then all Package references are removed from the Pool. If omitted, any existing Package references are left unchanged.
+ ApplicationPackageReferences *[]ApplicationPackageReference `json:"applicationPackageReferences,omitempty"`
+ // Metadata - If this element is present, it replaces any existing metadata configured on the Pool. If you specify an empty collection, any metadata is removed from the Pool. If omitted, any existing metadata is left unchanged.
+ Metadata *[]MetadataItem `json:"metadata,omitempty"`
+}
+
+// PoolResizeParameter ...
+type PoolResizeParameter struct {
+ TargetDedicatedNodes *int32 `json:"targetDedicatedNodes,omitempty"`
+ TargetLowPriorityNodes *int32 `json:"targetLowPriorityNodes,omitempty"`
+ // ResizeTimeout - The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request).
+ ResizeTimeout *string `json:"resizeTimeout,omitempty"`
+ // NodeDeallocationOption - The default value is requeue. Possible values include: 'Requeue', 'Terminate', 'TaskCompletion', 'RetainedData'
+ NodeDeallocationOption ComputeNodeDeallocationOption `json:"nodeDeallocationOption,omitempty"`
+}
+
+// PoolSpecification ...
+type PoolSpecification struct {
+ // DisplayName - The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024.
+ DisplayName *string `json:"displayName,omitempty"`
+ // VMSize - For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes).
+ VMSize *string `json:"vmSize,omitempty"`
+ // CloudServiceConfiguration - This property must be specified if the Pool needs to be created with Azure PaaS VMs. This property and virtualMachineConfiguration are mutually exclusive and one of the properties must be specified. If neither is specified then the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). This property cannot be specified if the Batch Account was created with its poolAllocationMode property set to 'UserSubscription'.
+ CloudServiceConfiguration *CloudServiceConfiguration `json:"cloudServiceConfiguration,omitempty"`
+ // VirtualMachineConfiguration - This property must be specified if the Pool needs to be created with Azure IaaS VMs. This property and cloudServiceConfiguration are mutually exclusive and one of the properties must be specified. If neither is specified then the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request).
+ VirtualMachineConfiguration *VirtualMachineConfiguration `json:"virtualMachineConfiguration,omitempty"`
+ // MaxTasksPerNode - The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the Pool or 256.
+ MaxTasksPerNode *int32 `json:"maxTasksPerNode,omitempty"`
+ // TaskSchedulingPolicy - If not specified, the default is spread.
+ TaskSchedulingPolicy *TaskSchedulingPolicy `json:"taskSchedulingPolicy,omitempty"`
+ // ResizeTimeout - This timeout applies only to manual scaling; it has no effect when enableAutoScale is set to true. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service rejects the request with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request).
+ ResizeTimeout *string `json:"resizeTimeout,omitempty"`
+ // TargetDedicatedNodes - This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both.
+ TargetDedicatedNodes *int32 `json:"targetDedicatedNodes,omitempty"`
+ // TargetLowPriorityNodes - This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both.
+ TargetLowPriorityNodes *int32 `json:"targetLowPriorityNodes,omitempty"`
+ // EnableAutoScale - If false, at least one of targetDedicateNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula element is required. The Pool automatically resizes according to the formula. The default value is false.
+ EnableAutoScale *bool `json:"enableAutoScale,omitempty"`
+ // AutoScaleFormula - This property must not be specified if enableAutoScale is set to false. It is required if enableAutoScale is set to true. The formula is checked for validity before the Pool is created. If the formula is not valid, the Batch service rejects the request with detailed error information.
+ AutoScaleFormula *string `json:"autoScaleFormula,omitempty"`
+ // AutoScaleEvaluationInterval - The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request).
+ AutoScaleEvaluationInterval *string `json:"autoScaleEvaluationInterval,omitempty"`
+ // EnableInterNodeCommunication - Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false.
+ EnableInterNodeCommunication *bool `json:"enableInterNodeCommunication,omitempty"`
+ NetworkConfiguration *NetworkConfiguration `json:"networkConfiguration,omitempty"`
+ StartTask *StartTask `json:"startTask,omitempty"`
+ // CertificateReferences - For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory.
+ CertificateReferences *[]CertificateReference `json:"certificateReferences,omitempty"`
+ // ApplicationPackageReferences - Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool.
+ ApplicationPackageReferences *[]ApplicationPackageReference `json:"applicationPackageReferences,omitempty"`
+ // ApplicationLicenses - The list of application licenses must be a subset of available Batch service application licenses. If a license is requested which is not supported, Pool creation will fail. The permitted licenses available on the Pool are 'maya', 'vray', '3dsmax', 'arnold'. An additional charge applies for each application license added to the Pool.
+ ApplicationLicenses *[]string `json:"applicationLicenses,omitempty"`
+ UserAccounts *[]UserAccount `json:"userAccounts,omitempty"`
+ // Metadata - The Batch service does not assign any meaning to metadata; it is solely for the use of user code.
+ Metadata *[]MetadataItem `json:"metadata,omitempty"`
+ // MountConfiguration - This supports Azure Files, NFS, CIFS/SMB, and Blobfuse.
+ MountConfiguration *[]MountConfiguration `json:"mountConfiguration,omitempty"`
+}
+
+// PoolStatistics ...
+type PoolStatistics struct {
+ autorest.Response `json:"-"`
+ URL *string `json:"url,omitempty"`
+ StartTime *date.Time `json:"startTime,omitempty"`
+ LastUpdateTime *date.Time `json:"lastUpdateTime,omitempty"`
+ UsageStats *UsageStatistics `json:"usageStats,omitempty"`
+ ResourceStats *ResourceStatistics `json:"resourceStats,omitempty"`
+}
+
+// PoolUpdatePropertiesParameter ...
+type PoolUpdatePropertiesParameter struct {
+ // StartTask - If this element is present, it overwrites any existing StartTask. If omitted, any existing StartTask is removed from the Pool.
+ StartTask *StartTask `json:"startTask,omitempty"`
+ // CertificateReferences - This list replaces any existing Certificate references configured on the Pool. If you specify an empty collection, any existing Certificate references are removed from the Pool. For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory.
+ CertificateReferences *[]CertificateReference `json:"certificateReferences,omitempty"`
+ // ApplicationPackageReferences - The list replaces any existing Application Package references on the Pool. Changes to Application Package references affect all new Compute Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Application Package references on any given Pool. If omitted, or if you specify an empty collection, any existing Application Packages references are removed from the Pool. A maximum of 10 references may be specified on a given Pool.
+ ApplicationPackageReferences *[]ApplicationPackageReference `json:"applicationPackageReferences,omitempty"`
+ // Metadata - This list replaces any existing metadata configured on the Pool. If omitted, or if you specify an empty collection, any existing metadata is removed from the Pool.
+ Metadata *[]MetadataItem `json:"metadata,omitempty"`
+}
+
+// PoolUsageMetrics ...
+type PoolUsageMetrics struct {
+ PoolID *string `json:"poolId,omitempty"`
+ StartTime *date.Time `json:"startTime,omitempty"`
+ EndTime *date.Time `json:"endTime,omitempty"`
+ // VMSize - For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes).
+ VMSize *string `json:"vmSize,omitempty"`
+ TotalCoreHours *float64 `json:"totalCoreHours,omitempty"`
+}
+
+// ReadCloser ...
+type ReadCloser struct {
+ autorest.Response `json:"-"`
+ Value *io.ReadCloser `json:"value,omitempty"`
+}
+
+// RecentJob ...
+type RecentJob struct {
+ ID *string `json:"id,omitempty"`
+ URL *string `json:"url,omitempty"`
+}
+
+// ResizeError ...
+type ResizeError struct {
+ Code *string `json:"code,omitempty"`
+ Message *string `json:"message,omitempty"`
+ Values *[]NameValuePair `json:"values,omitempty"`
+}
+
+// ResourceFile ...
+type ResourceFile struct {
+ // AutoStorageContainerName - The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified.
+ AutoStorageContainerName *string `json:"autoStorageContainerName,omitempty"`
+ // StorageContainerURL - The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. This URL must be readable and listable using anonymous access; that is, the Batch service does not present any credentials when downloading blobs from the container. There are two ways to get such a URL for a container in Azure storage: include a Shared Access Signature (SAS) granting read and list permissions on the container, or set the ACL for the container to allow public access.
+ StorageContainerURL *string `json:"storageContainerUrl,omitempty"`
+ // HTTPURL - The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. If the URL points to Azure Blob Storage, it must be readable using anonymous access; that is, the Batch service does not present any credentials when downloading the blob. There are two ways to get such a URL for a blob in Azure storage: include a Shared Access Signature (SAS) granting read permissions on the blob, or set the ACL for the blob or its container to allow public access.
+ HTTPURL *string `json:"httpUrl,omitempty"`
+ // BlobPrefix - The property is valid only when autoStorageContainerName or storageContainerUrl is used. This prefix can be a partial filename or a subdirectory. If a prefix is not specified, all the files in the container will be downloaded.
+ BlobPrefix *string `json:"blobPrefix,omitempty"`
+ // FilePath - If the httpUrl property is specified, the filePath is required and describes the path which the file will be downloaded to, including the filename. Otherwise, if the autoStorageContainerName or storageContainerUrl property is specified, filePath is optional and is the directory to download the files to. In the case where filePath is used as a directory, any directory structure already associated with the input data will be retained in full and appended to the specified filePath directory. The specified relative path cannot break out of the Task's working directory (for example by using '..').
+ FilePath *string `json:"filePath,omitempty"`
+ // FileMode - This property applies only to files being downloaded to Linux Compute Nodes. It will be ignored if it is specified for a resourceFile which will be downloaded to a Windows Compute Node. If this property is not specified for a Linux Compute Node, then a default value of 0770 is applied to the file.
+ FileMode *string `json:"fileMode,omitempty"`
+}
+
+// ResourceStatistics ...
+type ResourceStatistics struct {
+ StartTime *date.Time `json:"startTime,omitempty"`
+ LastUpdateTime *date.Time `json:"lastUpdateTime,omitempty"`
+ AvgCPUPercentage *float64 `json:"avgCPUPercentage,omitempty"`
+ AvgMemoryGiB *float64 `json:"avgMemoryGiB,omitempty"`
+ PeakMemoryGiB *float64 `json:"peakMemoryGiB,omitempty"`
+ AvgDiskGiB *float64 `json:"avgDiskGiB,omitempty"`
+ PeakDiskGiB *float64 `json:"peakDiskGiB,omitempty"`
+ DiskReadIOps *int64 `json:"diskReadIOps,omitempty"`
+ DiskWriteIOps *int64 `json:"diskWriteIOps,omitempty"`
+ DiskReadGiB *float64 `json:"diskReadGiB,omitempty"`
+ DiskWriteGiB *float64 `json:"diskWriteGiB,omitempty"`
+ NetworkReadGiB *float64 `json:"networkReadGiB,omitempty"`
+ NetworkWriteGiB *float64 `json:"networkWriteGiB,omitempty"`
+}
+
+// Schedule ...
+type Schedule struct {
+ // DoNotRunUntil - If you do not specify a doNotRunUntil time, the schedule becomes ready to create Jobs immediately.
+ DoNotRunUntil *date.Time `json:"doNotRunUntil,omitempty"`
+ // DoNotRunAfter - If you do not specify a doNotRunAfter time, and you are creating a recurring Job Schedule, the Job Schedule will remain active until you explicitly terminate it.
+ DoNotRunAfter *date.Time `json:"doNotRunAfter,omitempty"`
+ // StartWindow - If a Job is not created within the startWindow interval, then the 'opportunity' is lost; no Job will be created until the next recurrence of the schedule. If the schedule is recurring, and the startWindow is longer than the recurrence interval, then this is equivalent to an infinite startWindow, because the Job that is 'due' in one recurrenceInterval is not carried forward into the next recurrence interval. The default is infinite. The minimum value is 1 minute. If you specify a lower value, the Batch service rejects the schedule with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request).
+ StartWindow *string `json:"startWindow,omitempty"`
+ // RecurrenceInterval - Because a Job Schedule can have at most one active Job under it at any given time, if it is time to create a new Job under a Job Schedule, but the previous Job is still running, the Batch service will not create the new Job until the previous Job finishes. If the previous Job does not finish within the startWindow period of the new recurrenceInterval, then no new Job will be scheduled for that interval. For recurring Jobs, you should normally specify a jobManagerTask in the jobSpecification. If you do not use jobManagerTask, you will need an external process to monitor when Jobs are created, add Tasks to the Jobs and terminate the Jobs ready for the next recurrence. The default is that the schedule does not recur: one Job is created, within the startWindow after the doNotRunUntil time, and the schedule is complete as soon as that Job finishes. The minimum value is 1 minute. If you specify a lower value, the Batch service rejects the schedule with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request).
+ RecurrenceInterval *string `json:"recurrenceInterval,omitempty"`
+}
+
+// StartTask batch will retry Tasks when a recovery operation is triggered on a Node. Examples of recovery
+// operations include (but are not limited to) when an unhealthy Node is rebooted or a Compute Node
+// disappeared due to host failure. Retries due to recovery operations are independent of and are not
+// counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal retry due to a
+// recovery operation may occur. Because of this, all Tasks should be idempotent. This means Tasks need to
+// tolerate being interrupted and restarted without causing any corruption or duplicate data. The best
+// practice for long running Tasks is to use some form of checkpointing. In some cases the StartTask may be
+// re-run even though the Compute Node was not rebooted. Special care should be taken to avoid StartTasks
+// which create breakaway process or install/launch services from the StartTask working directory, as this
+// will block Batch from being able to re-run the StartTask.
+type StartTask struct {
+ // CommandLine - The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables).
+ CommandLine *string `json:"commandLine,omitempty"`
+ // ContainerSettings - When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files.
+ ContainerSettings *TaskContainerSettings `json:"containerSettings,omitempty"`
+ // ResourceFiles - Files listed under this element are located in the Task's working directory.
+ ResourceFiles *[]ResourceFile `json:"resourceFiles,omitempty"`
+ EnvironmentSettings *[]EnvironmentSetting `json:"environmentSettings,omitempty"`
+ // UserIdentity - If omitted, the Task runs as a non-administrative user unique to the Task.
+ UserIdentity *UserIdentity `json:"userIdentity,omitempty"`
+ // MaxTaskRetryCount - The Batch service retries a Task if its exit code is nonzero. Note that this value specifically controls the number of retries. The Batch service will try the Task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries the Task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry the Task. If the maximum retry count is -1, the Batch service retries the Task without limit.
+ MaxTaskRetryCount *int32 `json:"maxTaskRetryCount,omitempty"`
+ // WaitForSuccess - If true and the StartTask fails on a Node, the Batch service retries the StartTask up to its maximum retry count (maxTaskRetryCount). If the Task has still not completed successfully after all retries, then the Batch service marks the Node unusable, and will not schedule Tasks to it. This condition can be detected via the Compute Node state and failure info details. If false, the Batch service will not wait for the StartTask to complete. In this case, other Tasks can start executing on the Compute Node while the StartTask is still running; and even if the StartTask fails, new Tasks will continue to be scheduled on the Compute Node. The default is true.
+ WaitForSuccess *bool `json:"waitForSuccess,omitempty"`
+}
+
+// StartTaskInformation ...
+type StartTaskInformation struct {
+ // State - Possible values include: 'StartTaskStateRunning', 'StartTaskStateCompleted'
+ State StartTaskState `json:"state,omitempty"`
+ // StartTime - This value is reset every time the Task is restarted or retried (that is, this is the most recent time at which the StartTask started running).
+ StartTime *date.Time `json:"startTime,omitempty"`
+ // EndTime - This is the end time of the most recent run of the StartTask, if that run has completed (even if that run failed and a retry is pending). This element is not present if the StartTask is currently running.
+ EndTime *date.Time `json:"endTime,omitempty"`
+ // ExitCode - This property is set only if the StartTask is in the completed state. In general, the exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. However, if the Batch service terminates the StartTask (due to timeout, or user termination via the API) you may see an operating system-defined exit code.
+ ExitCode *int32 `json:"exitCode,omitempty"`
+ // ContainerInfo - This property is set only if the Task runs in a container context.
+ ContainerInfo *TaskContainerExecutionInformation `json:"containerInfo,omitempty"`
+ // FailureInfo - This property is set only if the Task is in the completed state and encountered a failure.
+ FailureInfo *TaskFailureInformation `json:"failureInfo,omitempty"`
+ // RetryCount - Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints.
+ RetryCount *int32 `json:"retryCount,omitempty"`
+ // LastRetryTime - This element is present only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not.
+ LastRetryTime *date.Time `json:"lastRetryTime,omitempty"`
+ // Result - If the value is 'failed', then the details of the failure can be found in the failureInfo property. Possible values include: 'Success', 'Failure'
+ Result TaskExecutionResult `json:"result,omitempty"`
+}
+
+// SubtaskInformation ...
+type SubtaskInformation struct {
+ ID *int32 `json:"id,omitempty"`
+ NodeInfo *ComputeNodeInformation `json:"nodeInfo,omitempty"`
+ StartTime *date.Time `json:"startTime,omitempty"`
+ // EndTime - This property is set only if the subtask is in the Completed state.
+ EndTime *date.Time `json:"endTime,omitempty"`
+ // ExitCode - This property is set only if the subtask is in the completed state. In general, the exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. However, if the Batch service terminates the subtask (due to timeout, or user termination via the API) you may see an operating system-defined exit code.
+ ExitCode *int32 `json:"exitCode,omitempty"`
+ // ContainerInfo - This property is set only if the Task runs in a container context.
+ ContainerInfo *TaskContainerExecutionInformation `json:"containerInfo,omitempty"`
+ // FailureInfo - This property is set only if the Task is in the completed state and encountered a failure.
+ FailureInfo *TaskFailureInformation `json:"failureInfo,omitempty"`
+ // State - Possible values include: 'SubtaskStatePreparing', 'SubtaskStateRunning', 'SubtaskStateCompleted'
+ State SubtaskState `json:"state,omitempty"`
+ StateTransitionTime *date.Time `json:"stateTransitionTime,omitempty"`
+ // PreviousState - This property is not set if the subtask is in its initial running state. Possible values include: 'SubtaskStatePreparing', 'SubtaskStateRunning', 'SubtaskStateCompleted'
+ PreviousState SubtaskState `json:"previousState,omitempty"`
+ // PreviousStateTransitionTime - This property is not set if the subtask is in its initial running state.
+ PreviousStateTransitionTime *date.Time `json:"previousStateTransitionTime,omitempty"`
+ // Result - If the value is 'failed', then the details of the failure can be found in the failureInfo property. Possible values include: 'Success', 'Failure'
+ Result TaskExecutionResult `json:"result,omitempty"`
+}
+
+// TaskAddCollectionParameter ...
+type TaskAddCollectionParameter struct {
+ // Value - The total serialized size of this collection must be less than 1MB. If it is greater than 1MB (for example if each Task has 100's of resource files or environment variables), the request will fail with code 'RequestBodyTooLarge' and should be retried again with fewer Tasks.
+ Value *[]TaskAddParameter `json:"value,omitempty"`
+}
+
+// TaskAddCollectionResult ...
+type TaskAddCollectionResult struct {
+ autorest.Response `json:"-"`
+ Value *[]TaskAddResult `json:"value,omitempty"`
+}
+
+// TaskAddParameter batch will retry Tasks when a recovery operation is triggered on a Node. Examples of
+// recovery operations include (but are not limited to) when an unhealthy Node is rebooted or a Compute
+// Node disappeared due to host failure. Retries due to recovery operations are independent of and are not
+// counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal retry due to a
+// recovery operation may occur. Because of this, all Tasks should be idempotent. This means Tasks need to
+// tolerate being interrupted and restarted without causing any corruption or duplicate data. The best
+// practice for long running Tasks is to use some form of checkpointing.
+type TaskAddParameter struct {
+ // ID - The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within a Job that differ only by case).
+ ID *string `json:"id,omitempty"`
+ // DisplayName - The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024.
+ DisplayName *string `json:"displayName,omitempty"`
+ // CommandLine - For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables).
+ CommandLine *string `json:"commandLine,omitempty"`
+ // ContainerSettings - If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files.
+ ContainerSettings *TaskContainerSettings `json:"containerSettings,omitempty"`
+ // ExitConditions - How the Batch service should respond when the Task completes.
+ ExitConditions *ExitConditions `json:"exitConditions,omitempty"`
+ // ResourceFiles - For multi-instance Tasks, the resource files will only be downloaded to the Compute Node on which the primary Task is executed. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers.
+ ResourceFiles *[]ResourceFile `json:"resourceFiles,omitempty"`
+ // OutputFiles - For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed.
+ OutputFiles *[]OutputFile `json:"outputFiles,omitempty"`
+ EnvironmentSettings *[]EnvironmentSetting `json:"environmentSettings,omitempty"`
+ AffinityInfo *AffinityInformation `json:"affinityInfo,omitempty"`
+ // Constraints - If you do not specify constraints, the maxTaskRetryCount is the maxTaskRetryCount specified for the Job, the maxWallClockTime is infinite, and the retentionTime is 7 days.
+ Constraints *TaskConstraints `json:"constraints,omitempty"`
+ // UserIdentity - If omitted, the Task runs as a non-administrative user unique to the Task.
+ UserIdentity *UserIdentity `json:"userIdentity,omitempty"`
+ MultiInstanceSettings *MultiInstanceSettings `json:"multiInstanceSettings,omitempty"`
+ // DependsOn - This Task will not be scheduled until all Tasks that it depends on have completed successfully. If any of those Tasks fail and exhaust their retry counts, this Task will never be scheduled. If the Job does not have usesTaskDependencies set to true, and this element is present, the request fails with error code TaskDependenciesNotSpecifiedOnJob.
+ DependsOn *TaskDependencies `json:"dependsOn,omitempty"`
+ // ApplicationPackageReferences - Application packages are downloaded and deployed to a shared directory, not the Task working directory. Therefore, if a referenced package is already on the Node, and is up to date, then it is not re-downloaded; the existing copy on the Compute Node is used. If a referenced Package cannot be installed, for example because the package has been deleted or because download failed, the Task fails.
+ ApplicationPackageReferences *[]ApplicationPackageReference `json:"applicationPackageReferences,omitempty"`
+ // AuthenticationTokenSettings - If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job.
+ AuthenticationTokenSettings *AuthenticationTokenSettings `json:"authenticationTokenSettings,omitempty"`
+}
+
+// TaskAddResult ...
+type TaskAddResult struct {
+ // Status - Possible values include: 'TaskAddStatusSuccess', 'TaskAddStatusClientError', 'TaskAddStatusServerError'
+ Status TaskAddStatus `json:"status,omitempty"`
+ TaskID *string `json:"taskId,omitempty"`
+ // ETag - You can use this to detect whether the Task has changed between requests. In particular, you can be pass the ETag with an Update Task request to specify that your changes should take effect only if nobody else has modified the Job in the meantime.
+ ETag *string `json:"eTag,omitempty"`
+ LastModified *date.Time `json:"lastModified,omitempty"`
+ Location *string `json:"location,omitempty"`
+ Error *Error `json:"error,omitempty"`
+}
+
+// TaskConstraints ...
+type TaskConstraints struct {
+ // MaxWallClockTime - If this is not specified, there is no time limit on how long the Task may run.
+ MaxWallClockTime *string `json:"maxWallClockTime,omitempty"`
+ // RetentionTime - The default is 7 days, i.e. the Task directory will be retained for 7 days unless the Compute Node is removed or the Job is deleted.
+ RetentionTime *string `json:"retentionTime,omitempty"`
+ // MaxTaskRetryCount - Note that this value specifically controls the number of retries for the Task executable due to a nonzero exit code. The Batch service will try the Task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries the Task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry the Task after the first attempt. If the maximum retry count is -1, the Batch service retries the Task without limit.
+ MaxTaskRetryCount *int32 `json:"maxTaskRetryCount,omitempty"`
+}
+
+// TaskContainerExecutionInformation ...
+type TaskContainerExecutionInformation struct {
+ ContainerID *string `json:"containerId,omitempty"`
+ // State - This is the state of the container according to the Docker service. It is equivalent to the status field returned by "docker inspect".
+ State *string `json:"state,omitempty"`
+ // Error - This is the detailed error string from the Docker service, if available. It is equivalent to the error field returned by "docker inspect".
+ Error *string `json:"error,omitempty"`
+}
+
+// TaskContainerSettings ...
+type TaskContainerSettings struct {
+ // ContainerRunOptions - These additional options are supplied as arguments to the "docker create" command, in addition to those controlled by the Batch Service.
+ ContainerRunOptions *string `json:"containerRunOptions,omitempty"`
+ // ImageName - This is the full Image reference, as would be specified to "docker pull". If no tag is provided as part of the Image name, the tag ":latest" is used as a default.
+ ImageName *string `json:"imageName,omitempty"`
+ // Registry - This setting can be omitted if was already provided at Pool creation.
+ Registry *ContainerRegistry `json:"registry,omitempty"`
+ // WorkingDirectory - The default is 'taskWorkingDirectory'. Possible values include: 'TaskWorkingDirectory', 'ContainerImageDefault'
+ WorkingDirectory ContainerWorkingDirectory `json:"workingDirectory,omitempty"`
+}
+
+// TaskCounts ...
+type TaskCounts struct {
+ autorest.Response `json:"-"`
+ Active *int32 `json:"active,omitempty"`
+ Running *int32 `json:"running,omitempty"`
+ Completed *int32 `json:"completed,omitempty"`
+ Succeeded *int32 `json:"succeeded,omitempty"`
+ Failed *int32 `json:"failed,omitempty"`
+}
+
+// TaskDependencies ...
+type TaskDependencies struct {
+ // TaskIds - The taskIds collection is limited to 64000 characters total (i.e. the combined length of all Task IDs). If the taskIds collection exceeds the maximum length, the Add Task request fails with error code TaskDependencyListTooLong. In this case consider using Task ID ranges instead.
+ TaskIds *[]string `json:"taskIds,omitempty"`
+ TaskIDRanges *[]TaskIDRange `json:"taskIdRanges,omitempty"`
+}
+
+// TaskExecutionInformation ...
+type TaskExecutionInformation struct {
+ // StartTime - 'Running' corresponds to the running state, so if the Task specifies resource files or Packages, then the start time reflects the time at which the Task started downloading or deploying these. If the Task has been restarted or retried, this is the most recent time at which the Task started running. This property is present only for Tasks that are in the running or completed state.
+ StartTime *date.Time `json:"startTime,omitempty"`
+ // EndTime - This property is set only if the Task is in the Completed state.
+ EndTime *date.Time `json:"endTime,omitempty"`
+ // ExitCode - This property is set only if the Task is in the completed state. In general, the exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. However, if the Batch service terminates the Task (due to timeout, or user termination via the API) you may see an operating system-defined exit code.
+ ExitCode *int32 `json:"exitCode,omitempty"`
+ // ContainerInfo - This property is set only if the Task runs in a container context.
+ ContainerInfo *TaskContainerExecutionInformation `json:"containerInfo,omitempty"`
+ // FailureInfo - This property is set only if the Task is in the completed state and encountered a failure.
+ FailureInfo *TaskFailureInformation `json:"failureInfo,omitempty"`
+ // RetryCount - Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints.
+ RetryCount *int32 `json:"retryCount,omitempty"`
+ // LastRetryTime - This element is present only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not.
+ LastRetryTime *date.Time `json:"lastRetryTime,omitempty"`
+ // RequeueCount - When the user removes Compute Nodes from a Pool (by resizing/shrinking the pool) or when the Job is being disabled, the user can specify that running Tasks on the Compute Nodes be requeued for execution. This count tracks how many times the Task has been requeued for these reasons.
+ RequeueCount *int32 `json:"requeueCount,omitempty"`
+ // LastRequeueTime - This property is set only if the requeueCount is nonzero.
+ LastRequeueTime *date.Time `json:"lastRequeueTime,omitempty"`
+ // Result - If the value is 'failed', then the details of the failure can be found in the failureInfo property. Possible values include: 'Success', 'Failure'
+ Result TaskExecutionResult `json:"result,omitempty"`
+}
+
+// TaskFailureInformation ...
+type TaskFailureInformation struct {
+ // Category - Possible values include: 'UserError', 'ServerError'
+ Category ErrorCategory `json:"category,omitempty"`
+ Code *string `json:"code,omitempty"`
+ Message *string `json:"message,omitempty"`
+ Details *[]NameValuePair `json:"details,omitempty"`
+}
+
+// TaskIDRange the start and end of the range are inclusive. For example, if a range has start 9 and end
+// 12, then it represents Tasks '9', '10', '11' and '12'.
+type TaskIDRange struct {
+ Start *int32 `json:"start,omitempty"`
+ End *int32 `json:"end,omitempty"`
+}
+
+// TaskInformation ...
+type TaskInformation struct {
+ TaskURL *string `json:"taskUrl,omitempty"`
+ JobID *string `json:"jobId,omitempty"`
+ TaskID *string `json:"taskId,omitempty"`
+ SubtaskID *int32 `json:"subtaskId,omitempty"`
+ // TaskState - Possible values include: 'TaskStateActive', 'TaskStatePreparing', 'TaskStateRunning', 'TaskStateCompleted'
+ TaskState TaskState `json:"taskState,omitempty"`
+ ExecutionInfo *TaskExecutionInformation `json:"executionInfo,omitempty"`
+}
+
+// TaskSchedulingPolicy ...
+type TaskSchedulingPolicy struct {
+ // NodeFillType - If not specified, the default is spread. Possible values include: 'Spread', 'Pack'
+ NodeFillType ComputeNodeFillType `json:"nodeFillType,omitempty"`
+}
+
+// TaskStatistics ...
+type TaskStatistics struct {
+ URL *string `json:"url,omitempty"`
+ StartTime *date.Time `json:"startTime,omitempty"`
+ LastUpdateTime *date.Time `json:"lastUpdateTime,omitempty"`
+ UserCPUTime *string `json:"userCPUTime,omitempty"`
+ KernelCPUTime *string `json:"kernelCPUTime,omitempty"`
+ // WallClockTime - The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If the Task was retried, this includes the wall clock time of all the Task retries.
+ WallClockTime *string `json:"wallClockTime,omitempty"`
+ ReadIOps *int64 `json:"readIOps,omitempty"`
+ WriteIOps *int64 `json:"writeIOps,omitempty"`
+ ReadIOGiB *float64 `json:"readIOGiB,omitempty"`
+ WriteIOGiB *float64 `json:"writeIOGiB,omitempty"`
+ WaitTime *string `json:"waitTime,omitempty"`
+}
+
+// TaskUpdateParameter ...
+type TaskUpdateParameter struct {
+ // Constraints - If omitted, the Task is given the default constraints. For multi-instance Tasks, updating the retention time applies only to the primary Task and not subtasks.
+ Constraints *TaskConstraints `json:"constraints,omitempty"`
+}
+
+// UploadBatchServiceLogsConfiguration ...
+type UploadBatchServiceLogsConfiguration struct {
+ // ContainerURL - The URL must include a Shared Access Signature (SAS) granting write permissions to the container. The SAS duration must allow enough time for the upload to finish. The start time for SAS is optional and recommended to not be specified.
+ ContainerURL *string `json:"containerUrl,omitempty"`
+ // StartTime - Any log file containing a log message in the time range will be uploaded. This means that the operation might retrieve more logs than have been requested since the entire log file is always uploaded, but the operation should not retrieve fewer logs than have been requested.
+ StartTime *date.Time `json:"startTime,omitempty"`
+ // EndTime - Any log file containing a log message in the time range will be uploaded. This means that the operation might retrieve more logs than have been requested since the entire log file is always uploaded, but the operation should not retrieve fewer logs than have been requested. If omitted, the default is to upload all logs available after the startTime.
+ EndTime *date.Time `json:"endTime,omitempty"`
+}
+
+// UploadBatchServiceLogsResult ...
+type UploadBatchServiceLogsResult struct {
+ autorest.Response `json:"-"`
+ // VirtualDirectoryName - The virtual directory name is part of the blob name for each log file uploaded, and it is built based poolId, nodeId and a unique identifier.
+ VirtualDirectoryName *string `json:"virtualDirectoryName,omitempty"`
+ NumberOfFilesUploaded *int32 `json:"numberOfFilesUploaded,omitempty"`
+}
+
+// UsageStatistics ...
+type UsageStatistics struct {
+ StartTime *date.Time `json:"startTime,omitempty"`
+ LastUpdateTime *date.Time `json:"lastUpdateTime,omitempty"`
+ DedicatedCoreTime *string `json:"dedicatedCoreTime,omitempty"`
+}
+
+// UserAccount ...
+type UserAccount struct {
+ Name *string `json:"name,omitempty"`
+ Password *string `json:"password,omitempty"`
+ // ElevationLevel - The default value is nonAdmin. Possible values include: 'NonAdmin', 'Admin'
+ ElevationLevel ElevationLevel `json:"elevationLevel,omitempty"`
+ // LinuxUserConfiguration - This property is ignored if specified on a Windows Pool. If not specified, the user is created with the default options.
+ LinuxUserConfiguration *LinuxUserConfiguration `json:"linuxUserConfiguration,omitempty"`
+ // WindowsUserConfiguration - This property can only be specified if the user is on a Windows Pool. If not specified and on a Windows Pool, the user is created with the default options.
+ WindowsUserConfiguration *WindowsUserConfiguration `json:"windowsUserConfiguration,omitempty"`
+}
+
+// UserIdentity specify either the userName or autoUser property, but not both.
+type UserIdentity struct {
+ // UserName - The userName and autoUser properties are mutually exclusive; you must specify one but not both.
+ UserName *string `json:"username,omitempty"`
+ // AutoUser - The userName and autoUser properties are mutually exclusive; you must specify one but not both.
+ AutoUser *AutoUserSpecification `json:"autoUser,omitempty"`
+}
+
+// VirtualMachineConfiguration ...
+type VirtualMachineConfiguration struct {
+ ImageReference *ImageReference `json:"imageReference,omitempty"`
+ // NodeAgentSKUID - The Batch Compute Node agent is a program that runs on each Compute Node in the Pool, and provides the command-and-control interface between the Compute Node and the Batch service. There are different implementations of the Compute Node agent, known as SKUs, for different operating systems. You must specify a Compute Node agent SKU which matches the selected Image reference. To get the list of supported Compute Node agent SKUs along with their list of verified Image references, see the 'List supported Compute Node agent SKUs' operation.
+ NodeAgentSKUID *string `json:"nodeAgentSKUId,omitempty"`
+ // WindowsConfiguration - This property must not be specified if the imageReference property specifies a Linux OS Image.
+ WindowsConfiguration *WindowsConfiguration `json:"windowsConfiguration,omitempty"`
+ // DataDisks - This property must be specified if the Compute Nodes in the Pool need to have empty data disks attached to them. This cannot be updated. Each Compute Node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. When the Compute Node is removed from the Pool, the disk and all data associated with it is also deleted. The disk is not formatted after being attached, it must be formatted before use - for more information see https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux and https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine.
+ DataDisks *[]DataDisk `json:"dataDisks,omitempty"`
+ // LicenseType - This only applies to Images that contain the Windows operating system, and should only be used when you hold valid on-premises licenses for the Compute Nodes which will be deployed. If omitted, no on-premises licensing discount is applied. Values are:
+ // Windows_Server - The on-premises license is for Windows Server.
+ // Windows_Client - The on-premises license is for Windows Client.
+ LicenseType *string `json:"licenseType,omitempty"`
+ // ContainerConfiguration - If specified, setup is performed on each Compute Node in the Pool to allow Tasks to run in containers. All regular Tasks and Job manager Tasks run on this Pool must specify the containerSettings property, and all other Tasks may specify it.
+ ContainerConfiguration *ContainerConfiguration `json:"containerConfiguration,omitempty"`
+}
+
+// WindowsConfiguration ...
+type WindowsConfiguration struct {
+ // EnableAutomaticUpdates - If omitted, the default value is true.
+ EnableAutomaticUpdates *bool `json:"enableAutomaticUpdates,omitempty"`
+}
+
+// WindowsUserConfiguration ...
+type WindowsUserConfiguration struct {
+ // LoginMode - The default value for VirtualMachineConfiguration Pools is 'batch' and for CloudServiceConfiguration Pools is 'interactive'. Possible values include: 'Batch', 'Interactive'
+ LoginMode LoginMode `json:"loginMode,omitempty"`
+}
diff --git a/services/batch/2019-08-01.10.0/batch/pool.go b/services/batch/2019-08-01.10.0/batch/pool.go
new file mode 100644
index 000000000000..e5e2d67af908
--- /dev/null
+++ b/services/batch/2019-08-01.10.0/batch/pool.go
@@ -0,0 +1,2054 @@
+package batch
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/date"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "github.com/satori/go.uuid"
+ "net/http"
+)
+
+// PoolClient is the a client for issuing REST requests to the Azure Batch service.
+type PoolClient struct {
+ BaseClient
+}
+
+// NewPoolClient creates an instance of the PoolClient client.
+func NewPoolClient(batchURL string) PoolClient {
+ return PoolClient{New(batchURL)}
+}
+
+// Add when naming Pools, avoid including sensitive information such as user names or secret project names. This
+// information may appear in telemetry logs accessible to Microsoft Support engineers.
+// Parameters:
+// pool - the Pool to be added.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client PoolClient) Add(ctx context.Context, pool PoolAddParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.Add")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: pool,
+ Constraints: []validation.Constraint{{Target: "pool.ID", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "pool.VMSize", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "pool.CloudServiceConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "pool.CloudServiceConfiguration.OsFamily", Name: validation.Null, Rule: true, Chain: nil}}},
+ {Target: "pool.VirtualMachineConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "pool.VirtualMachineConfiguration.ImageReference", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "pool.VirtualMachineConfiguration.NodeAgentSKUID", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "pool.VirtualMachineConfiguration.ContainerConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "pool.VirtualMachineConfiguration.ContainerConfiguration.Type", Name: validation.Null, Rule: true, Chain: nil}}},
+ }},
+ {Target: "pool.NetworkConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "pool.NetworkConfiguration.EndpointConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "pool.NetworkConfiguration.EndpointConfiguration.InboundNATPools", Name: validation.Null, Rule: true, Chain: nil}}},
+ }},
+ {Target: "pool.StartTask", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "pool.StartTask.CommandLine", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "pool.StartTask.ContainerSettings", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "pool.StartTask.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "pool.StartTask.ContainerSettings.Registry", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "pool.StartTask.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "pool.StartTask.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ }},
+ }}}}}); err != nil {
+ return result, validation.NewError("batch.PoolClient", "Add", err.Error())
+ }
+
+ req, err := client.AddPreparer(ctx, pool, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "Add", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.AddSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "Add", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.AddResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "Add", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// AddPreparer prepares the Add request.
+func (client PoolClient) AddPreparer(ctx context.Context, pool PoolAddParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPath("/pools"),
+ autorest.WithJSON(pool),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// AddSender sends the Add request. The method will close the
+// http.Response Body if it receives an error.
+func (client PoolClient) AddSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// AddResponder handles the response to the Add request. The method always
+// closes the http.Response Body.
+func (client PoolClient) AddResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Delete when you request that a Pool be deleted, the following actions occur: the Pool state is set to deleting; any
+// ongoing resize operation on the Pool are stopped; the Batch service starts resizing the Pool to zero Compute Nodes;
+// any Tasks running on existing Compute Nodes are terminated and requeued (as if a resize Pool operation had been
+// requested with the default requeue option); finally, the Pool is removed from the system. Because running Tasks are
+// requeued, the user can rerun these Tasks by updating their Job to target a different Pool. The Tasks can then run on
+// the new Pool. If you want to override the requeue behavior, then you should call resize Pool explicitly to shrink
+// the Pool to zero size before deleting the Pool. If you call an Update, Patch or Delete API on a Pool in the deleting
+// state, it will fail with HTTP status code 409 with error code PoolBeingDeleted.
+// Parameters:
+// poolID - the ID of the Pool to delete.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will
+// be performed only if the resource's current ETag on the service exactly matches the value specified by the
+// client.
+// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation
+// will be performed only if the resource's current ETag on the service does not match the value specified by
+// the client.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client PoolClient) Delete(ctx context.Context, poolID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, poolID, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client PoolClient) DeletePreparer(ctx context.Context, poolID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "poolId": autorest.Encode("path", poolID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/pools/{poolId}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client PoolClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client PoolClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// DisableAutoScale sends the disable auto scale request.
+// Parameters:
+// poolID - the ID of the Pool on which to disable automatic scaling.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client PoolClient) DisableAutoScale(ctx context.Context, poolID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.DisableAutoScale")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DisableAutoScalePreparer(ctx, poolID, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "DisableAutoScale", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DisableAutoScaleSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "DisableAutoScale", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DisableAutoScaleResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "DisableAutoScale", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DisableAutoScalePreparer prepares the DisableAutoScale request.
+func (client PoolClient) DisableAutoScalePreparer(ctx context.Context, poolID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "poolId": autorest.Encode("path", poolID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/pools/{poolId}/disableautoscale", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DisableAutoScaleSender sends the DisableAutoScale request. The method will close the
+// http.Response Body if it receives an error.
+func (client PoolClient) DisableAutoScaleSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DisableAutoScaleResponder handles the response to the DisableAutoScale request. The method always
+// closes the http.Response Body.
+func (client PoolClient) DisableAutoScaleResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// EnableAutoScale you cannot enable automatic scaling on a Pool if a resize operation is in progress on the Pool. If
+// automatic scaling of the Pool is currently disabled, you must specify a valid autoscale formula as part of the
+// request. If automatic scaling of the Pool is already enabled, you may specify a new autoscale formula and/or a new
+// evaluation interval. You cannot call this API for the same Pool more than once every 30 seconds.
+// Parameters:
+// poolID - the ID of the Pool on which to enable automatic scaling.
+// poolEnableAutoScaleParameter - the parameters for the request.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will
+// be performed only if the resource's current ETag on the service exactly matches the value specified by the
+// client.
+// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation
+// will be performed only if the resource's current ETag on the service does not match the value specified by
+// the client.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client PoolClient) EnableAutoScale(ctx context.Context, poolID string, poolEnableAutoScaleParameter PoolEnableAutoScaleParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.EnableAutoScale")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.EnableAutoScalePreparer(ctx, poolID, poolEnableAutoScaleParameter, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "EnableAutoScale", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.EnableAutoScaleSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "EnableAutoScale", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.EnableAutoScaleResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "EnableAutoScale", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// EnableAutoScalePreparer prepares the EnableAutoScale request.
+func (client PoolClient) EnableAutoScalePreparer(ctx context.Context, poolID string, poolEnableAutoScaleParameter PoolEnableAutoScaleParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "poolId": autorest.Encode("path", poolID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/pools/{poolId}/enableautoscale", pathParameters),
+ autorest.WithJSON(poolEnableAutoScaleParameter),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// EnableAutoScaleSender sends the EnableAutoScale request. The method will close the
+// http.Response Body if it receives an error.
+func (client PoolClient) EnableAutoScaleSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// EnableAutoScaleResponder handles the response to the EnableAutoScale request. The method always
+// closes the http.Response Body.
+func (client PoolClient) EnableAutoScaleResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// EvaluateAutoScale this API is primarily for validating an autoscale formula, as it simply returns the result without
+// applying the formula to the Pool. The Pool must have auto scaling enabled in order to evaluate a formula.
+// Parameters:
+// poolID - the ID of the Pool on which to evaluate the automatic scaling formula.
+// poolEvaluateAutoScaleParameter - the parameters for the request.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client PoolClient) EvaluateAutoScale(ctx context.Context, poolID string, poolEvaluateAutoScaleParameter PoolEvaluateAutoScaleParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result AutoScaleRun, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.EvaluateAutoScale")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: poolEvaluateAutoScaleParameter,
+ Constraints: []validation.Constraint{{Target: "poolEvaluateAutoScaleParameter.AutoScaleFormula", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.PoolClient", "EvaluateAutoScale", err.Error())
+ }
+
+ req, err := client.EvaluateAutoScalePreparer(ctx, poolID, poolEvaluateAutoScaleParameter, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "EvaluateAutoScale", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.EvaluateAutoScaleSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "EvaluateAutoScale", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.EvaluateAutoScaleResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "EvaluateAutoScale", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// EvaluateAutoScalePreparer prepares the EvaluateAutoScale request.
+func (client PoolClient) EvaluateAutoScalePreparer(ctx context.Context, poolID string, poolEvaluateAutoScaleParameter PoolEvaluateAutoScaleParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "poolId": autorest.Encode("path", poolID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/pools/{poolId}/evaluateautoscale", pathParameters),
+ autorest.WithJSON(poolEvaluateAutoScaleParameter),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// EvaluateAutoScaleSender sends the EvaluateAutoScale request. The method will close the
+// http.Response Body if it receives an error.
+func (client PoolClient) EvaluateAutoScaleSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// EvaluateAutoScaleResponder handles the response to the EvaluateAutoScale request. The method always
+// closes the http.Response Body.
+func (client PoolClient) EvaluateAutoScaleResponder(resp *http.Response) (result AutoScaleRun, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Exists gets basic properties of a Pool.
+// Parameters:
+// poolID - the ID of the Pool to get.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will
+// be performed only if the resource's current ETag on the service exactly matches the value specified by the
+// client.
+// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation
+// will be performed only if the resource's current ETag on the service does not match the value specified by
+// the client.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client PoolClient) Exists(ctx context.Context, poolID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.Exists")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.ExistsPreparer(ctx, poolID, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "Exists", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ExistsSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "Exists", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ExistsResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "Exists", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ExistsPreparer prepares the Exists request.
+func (client PoolClient) ExistsPreparer(ctx context.Context, poolID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "poolId": autorest.Encode("path", poolID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsHead(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/pools/{poolId}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ExistsSender sends the Exists request. The method will close the
+// http.Response Body if it receives an error.
+func (client PoolClient) ExistsSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ExistsResponder handles the response to the Exists request. The method always
+// closes the http.Response Body.
+func (client PoolClient) ExistsResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets information about the specified Pool.
+// Parameters:
+// poolID - the ID of the Pool to get.
+// selectParameter - an OData $select clause.
+// expand - an OData $expand clause.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will
+// be performed only if the resource's current ETag on the service exactly matches the value specified by the
+// client.
+// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation
+// will be performed only if the resource's current ETag on the service does not match the value specified by
+// the client.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client PoolClient) Get(ctx context.Context, poolID string, selectParameter string, expand string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result CloudPool, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, poolID, selectParameter, expand, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client PoolClient) GetPreparer(ctx context.Context, poolID string, selectParameter string, expand string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "poolId": autorest.Encode("path", poolID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(selectParameter) > 0 {
+ queryParameters["$select"] = autorest.Encode("query", selectParameter)
+ }
+ if len(expand) > 0 {
+ queryParameters["$expand"] = autorest.Encode("query", expand)
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/pools/{poolId}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client PoolClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client PoolClient) GetResponder(resp *http.Response) (result CloudPool, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetAllLifetimeStatistics statistics are aggregated across all Pools that have ever existed in the Account, from
+// Account creation to the last update time of the statistics. The statistics may not be immediately available. The
+// Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes.
+// Parameters:
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client PoolClient) GetAllLifetimeStatistics(ctx context.Context, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result PoolStatistics, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.GetAllLifetimeStatistics")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetAllLifetimeStatisticsPreparer(ctx, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "GetAllLifetimeStatistics", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetAllLifetimeStatisticsSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "GetAllLifetimeStatistics", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetAllLifetimeStatisticsResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "GetAllLifetimeStatistics", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetAllLifetimeStatisticsPreparer prepares the GetAllLifetimeStatistics request.
+func (client PoolClient) GetAllLifetimeStatisticsPreparer(ctx context.Context, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPath("/lifetimepoolstats"),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetAllLifetimeStatisticsSender sends the GetAllLifetimeStatistics request. The method will close the
+// http.Response Body if it receives an error.
+func (client PoolClient) GetAllLifetimeStatisticsSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetAllLifetimeStatisticsResponder handles the response to the GetAllLifetimeStatistics request. The method always
+// closes the http.Response Body.
+func (client PoolClient) GetAllLifetimeStatisticsResponder(resp *http.Response) (result PoolStatistics, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List sends the list request.
+// Parameters:
+// filter - an OData $filter clause. For more information on constructing this filter, see
+// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-pools.
+// selectParameter - an OData $select clause.
+// expand - an OData $expand clause.
+// maxResults - the maximum number of items to return in the response. A maximum of 1000 Pools can be returned.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client PoolClient) List(ctx context.Context, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result CloudPoolListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.List")
+ defer func() {
+ sc := -1
+ if result.cplr.Response.Response != nil {
+ sc = result.cplr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: maxResults,
+ Constraints: []validation.Constraint{{Target: "maxResults", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "maxResults", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil},
+ {Target: "maxResults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
+ }}}}}); err != nil {
+ return result, validation.NewError("batch.PoolClient", "List", err.Error())
+ }
+
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx, filter, selectParameter, expand, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.cplr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.cplr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client PoolClient) ListPreparer(ctx context.Context, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
+ if len(selectParameter) > 0 {
+ queryParameters["$select"] = autorest.Encode("query", selectParameter)
+ }
+ if len(expand) > 0 {
+ queryParameters["$expand"] = autorest.Encode("query", expand)
+ }
+ if maxResults != nil {
+ queryParameters["maxresults"] = autorest.Encode("query", *maxResults)
+ } else {
+ queryParameters["maxresults"] = autorest.Encode("query", 1000)
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPath("/pools"),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client PoolClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client PoolClient) ListResponder(resp *http.Response) (result CloudPoolListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client PoolClient) listNextResults(ctx context.Context, lastResults CloudPoolListResult) (result CloudPoolListResult, err error) {
+ req, err := lastResults.cloudPoolListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "batch.PoolClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "batch.PoolClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client PoolClient) ListComplete(ctx context.Context, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result CloudPoolListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, filter, selectParameter, expand, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ return
+}
+
+// ListUsageMetrics if you do not specify a $filter clause including a poolId, the response includes all Pools that
+// existed in the Account in the time range of the returned aggregation intervals. If you do not specify a $filter
+// clause including a startTime or endTime these filters default to the start and end times of the last aggregation
+// interval currently available; that is, only the last aggregation interval is returned.
+// Parameters:
+// startTime - the earliest time from which to include metrics. This must be at least two and a half hours
+// before the current time. If not specified this defaults to the start time of the last aggregation interval
+// currently available.
+// endTime - the latest time from which to include metrics. This must be at least two hours before the current
+// time. If not specified this defaults to the end time of the last aggregation interval currently available.
+// filter - an OData $filter clause. For more information on constructing this filter, see
+// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics.
+// maxResults - the maximum number of items to return in the response. A maximum of 1000 results will be
+// returned.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client PoolClient) ListUsageMetrics(ctx context.Context, startTime *date.Time, endTime *date.Time, filter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result PoolListUsageMetricsResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.ListUsageMetrics")
+ defer func() {
+ sc := -1
+ if result.plumr.Response.Response != nil {
+ sc = result.plumr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: maxResults,
+ Constraints: []validation.Constraint{{Target: "maxResults", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "maxResults", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil},
+ {Target: "maxResults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
+ }}}}}); err != nil {
+ return result, validation.NewError("batch.PoolClient", "ListUsageMetrics", err.Error())
+ }
+
+ result.fn = client.listUsageMetricsNextResults
+ req, err := client.ListUsageMetricsPreparer(ctx, startTime, endTime, filter, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "ListUsageMetrics", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListUsageMetricsSender(req)
+ if err != nil {
+ result.plumr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "ListUsageMetrics", resp, "Failure sending request")
+ return
+ }
+
+ result.plumr, err = client.ListUsageMetricsResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "ListUsageMetrics", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListUsageMetricsPreparer prepares the ListUsageMetrics request.
+func (client PoolClient) ListUsageMetricsPreparer(ctx context.Context, startTime *date.Time, endTime *date.Time, filter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if startTime != nil {
+ queryParameters["starttime"] = autorest.Encode("query", *startTime)
+ }
+ if endTime != nil {
+ queryParameters["endtime"] = autorest.Encode("query", *endTime)
+ }
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
+ if maxResults != nil {
+ queryParameters["maxresults"] = autorest.Encode("query", *maxResults)
+ } else {
+ queryParameters["maxresults"] = autorest.Encode("query", 1000)
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPath("/poolusagemetrics"),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListUsageMetricsSender sends the ListUsageMetrics request. The method will close the
+// http.Response Body if it receives an error.
+func (client PoolClient) ListUsageMetricsSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListUsageMetricsResponder handles the response to the ListUsageMetrics request. The method always
+// closes the http.Response Body.
+func (client PoolClient) ListUsageMetricsResponder(resp *http.Response) (result PoolListUsageMetricsResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listUsageMetricsNextResults retrieves the next set of results, if any.
+func (client PoolClient) listUsageMetricsNextResults(ctx context.Context, lastResults PoolListUsageMetricsResult) (result PoolListUsageMetricsResult, err error) {
+ req, err := lastResults.poolListUsageMetricsResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "batch.PoolClient", "listUsageMetricsNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListUsageMetricsSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "batch.PoolClient", "listUsageMetricsNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListUsageMetricsResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "listUsageMetricsNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListUsageMetricsComplete enumerates all values, automatically crossing page boundaries as required.
+func (client PoolClient) ListUsageMetricsComplete(ctx context.Context, startTime *date.Time, endTime *date.Time, filter string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result PoolListUsageMetricsResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.ListUsageMetrics")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListUsageMetrics(ctx, startTime, endTime, filter, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ return
+}
+
+// Patch this only replaces the Pool properties specified in the request. For example, if the Pool has a StartTask
+// associated with it, and a request does not specify a StartTask element, then the Pool keeps the existing StartTask.
+// Parameters:
+// poolID - the ID of the Pool to update.
+// poolPatchParameter - the parameters for the request.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will
+// be performed only if the resource's current ETag on the service exactly matches the value specified by the
+// client.
+// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation
+// will be performed only if the resource's current ETag on the service does not match the value specified by
+// the client.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client PoolClient) Patch(ctx context.Context, poolID string, poolPatchParameter PoolPatchParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.Patch")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.PatchPreparer(ctx, poolID, poolPatchParameter, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "Patch", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.PatchSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "Patch", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.PatchResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "Patch", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// PatchPreparer prepares the Patch request.
+func (client PoolClient) PatchPreparer(ctx context.Context, poolID string, poolPatchParameter PoolPatchParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "poolId": autorest.Encode("path", poolID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/pools/{poolId}", pathParameters),
+ autorest.WithJSON(poolPatchParameter),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// PatchSender sends the Patch request. The method will close the
+// http.Response Body if it receives an error.
+func (client PoolClient) PatchSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// PatchResponder handles the response to the Patch request. The method always
+// closes the http.Response Body.
+func (client PoolClient) PatchResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// RemoveNodes this operation can only run when the allocation state of the Pool is steady. When this operation runs,
+// the allocation state changes from steady to resizing.
+// Parameters:
+// poolID - the ID of the Pool from which you want to remove Compute Nodes.
+// nodeRemoveParameter - the parameters for the request.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will
+// be performed only if the resource's current ETag on the service exactly matches the value specified by the
+// client.
+// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation
+// will be performed only if the resource's current ETag on the service does not match the value specified by
+// the client.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client PoolClient) RemoveNodes(ctx context.Context, poolID string, nodeRemoveParameter NodeRemoveParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.RemoveNodes")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: nodeRemoveParameter,
+ Constraints: []validation.Constraint{{Target: "nodeRemoveParameter.NodeList", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "nodeRemoveParameter.NodeList", Name: validation.MaxItems, Rule: 100, Chain: nil}}}}}}); err != nil {
+ return result, validation.NewError("batch.PoolClient", "RemoveNodes", err.Error())
+ }
+
+ req, err := client.RemoveNodesPreparer(ctx, poolID, nodeRemoveParameter, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "RemoveNodes", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.RemoveNodesSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "RemoveNodes", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.RemoveNodesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "RemoveNodes", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// RemoveNodesPreparer prepares the RemoveNodes request.
+func (client PoolClient) RemoveNodesPreparer(ctx context.Context, poolID string, nodeRemoveParameter NodeRemoveParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "poolId": autorest.Encode("path", poolID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/pools/{poolId}/removenodes", pathParameters),
+ autorest.WithJSON(nodeRemoveParameter),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// RemoveNodesSender sends the RemoveNodes request. The method will close the
+// http.Response Body if it receives an error.
+func (client PoolClient) RemoveNodesSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// RemoveNodesResponder handles the response to the RemoveNodes request. The method always
+// closes the http.Response Body.
+func (client PoolClient) RemoveNodesResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Resize you can only resize a Pool when its allocation state is steady. If the Pool is already resizing, the request
+// fails with status code 409. When you resize a Pool, the Pool's allocation state changes from steady to resizing. You
+// cannot resize Pools which are configured for automatic scaling. If you try to do this, the Batch service returns an
+// error 409. If you resize a Pool downwards, the Batch service chooses which Compute Nodes to remove. To remove
+// specific Compute Nodes, use the Pool remove Compute Nodes API instead.
+// Parameters:
+// poolID - the ID of the Pool to resize.
+// poolResizeParameter - the parameters for the request.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will
+// be performed only if the resource's current ETag on the service exactly matches the value specified by the
+// client.
+// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation
+// will be performed only if the resource's current ETag on the service does not match the value specified by
+// the client.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client PoolClient) Resize(ctx context.Context, poolID string, poolResizeParameter PoolResizeParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.Resize")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.ResizePreparer(ctx, poolID, poolResizeParameter, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "Resize", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ResizeSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "Resize", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ResizeResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "Resize", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ResizePreparer prepares the Resize request.
+func (client PoolClient) ResizePreparer(ctx context.Context, poolID string, poolResizeParameter PoolResizeParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "poolId": autorest.Encode("path", poolID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/pools/{poolId}/resize", pathParameters),
+ autorest.WithJSON(poolResizeParameter),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ResizeSender sends the Resize request. The method will close the
+// http.Response Body if it receives an error.
+func (client PoolClient) ResizeSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ResizeResponder handles the response to the Resize request. The method always
+// closes the http.Response Body.
+func (client PoolClient) ResizeResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// StopResize this does not restore the Pool to its previous state before the resize operation: it only stops any
+// further changes being made, and the Pool maintains its current state. After stopping, the Pool stabilizes at the
+// number of Compute Nodes it was at when the stop operation was done. During the stop operation, the Pool allocation
+// state changes first to stopping and then to steady. A resize operation need not be an explicit resize Pool request;
+// this API can also be used to halt the initial sizing of the Pool when it is created.
+// Parameters:
+// poolID - the ID of the Pool whose resizing you want to stop.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will
+// be performed only if the resource's current ETag on the service exactly matches the value specified by the
+// client.
+// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation
+// will be performed only if the resource's current ETag on the service does not match the value specified by
+// the client.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client PoolClient) StopResize(ctx context.Context, poolID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.StopResize")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.StopResizePreparer(ctx, poolID, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "StopResize", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.StopResizeSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "StopResize", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.StopResizeResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "StopResize", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// StopResizePreparer prepares the StopResize request.
+func (client PoolClient) StopResizePreparer(ctx context.Context, poolID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "poolId": autorest.Encode("path", poolID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/pools/{poolId}/stopresize", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// StopResizeSender sends the StopResize request. The method will close the
+// http.Response Body if it receives an error.
+func (client PoolClient) StopResizeSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// StopResizeResponder handles the response to the StopResize request. The method always
+// closes the http.Response Body.
+func (client PoolClient) StopResizeResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// UpdateProperties this fully replaces all the updatable properties of the Pool. For example, if the Pool has a
+// StartTask associated with it and if StartTask is not specified with this request, then the Batch service will remove
+// the existing StartTask.
+// Parameters:
+// poolID - the ID of the Pool to update.
+// poolUpdatePropertiesParameter - the parameters for the request.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client PoolClient) UpdateProperties(ctx context.Context, poolID string, poolUpdatePropertiesParameter PoolUpdatePropertiesParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.UpdateProperties")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: poolUpdatePropertiesParameter,
+ Constraints: []validation.Constraint{{Target: "poolUpdatePropertiesParameter.StartTask", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "poolUpdatePropertiesParameter.StartTask.CommandLine", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "poolUpdatePropertiesParameter.StartTask.ContainerSettings", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "poolUpdatePropertiesParameter.StartTask.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "poolUpdatePropertiesParameter.StartTask.ContainerSettings.Registry", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "poolUpdatePropertiesParameter.StartTask.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "poolUpdatePropertiesParameter.StartTask.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ }},
+ }},
+ {Target: "poolUpdatePropertiesParameter.CertificateReferences", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "poolUpdatePropertiesParameter.ApplicationPackageReferences", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "poolUpdatePropertiesParameter.Metadata", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.PoolClient", "UpdateProperties", err.Error())
+ }
+
+ req, err := client.UpdatePropertiesPreparer(ctx, poolID, poolUpdatePropertiesParameter, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "UpdateProperties", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.UpdatePropertiesSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "UpdateProperties", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.UpdatePropertiesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "UpdateProperties", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// UpdatePropertiesPreparer prepares the UpdateProperties request.
+func (client PoolClient) UpdatePropertiesPreparer(ctx context.Context, poolID string, poolUpdatePropertiesParameter PoolUpdatePropertiesParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "poolId": autorest.Encode("path", poolID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/pools/{poolId}/updateproperties", pathParameters),
+ autorest.WithJSON(poolUpdatePropertiesParameter),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdatePropertiesSender sends the UpdateProperties request. The method will close the
+// http.Response Body if it receives an error.
+func (client PoolClient) UpdatePropertiesSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// UpdatePropertiesResponder handles the response to the UpdateProperties request. The method always
+// closes the http.Response Body.
+func (client PoolClient) UpdatePropertiesResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
diff --git a/services/batch/2019-08-01.10.0/batch/task.go b/services/batch/2019-08-01.10.0/batch/task.go
new file mode 100644
index 000000000000..9672a71fa130
--- /dev/null
+++ b/services/batch/2019-08-01.10.0/batch/task.go
@@ -0,0 +1,1258 @@
+package batch
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/date"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "github.com/satori/go.uuid"
+ "net/http"
+)
+
+// TaskClient is the a client for issuing REST requests to the Azure Batch service.
+type TaskClient struct {
+ BaseClient
+}
+
+// NewTaskClient creates an instance of the TaskClient client.
+func NewTaskClient(batchURL string) TaskClient {
+ return TaskClient{New(batchURL)}
+}
+
+// Add the maximum lifetime of a Task from addition to completion is 180 days. If a Task has not completed within 180
+// days of being added it will be terminated by the Batch service and left in whatever state it was in at that time.
+// Parameters:
+// jobID - the ID of the Job to which the Task is to be added.
+// task - the Task to be added.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client TaskClient) Add(ctx context.Context, jobID string, task TaskAddParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TaskClient.Add")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: task,
+ Constraints: []validation.Constraint{{Target: "task.ID", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "task.CommandLine", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "task.ContainerSettings", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "task.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "task.ContainerSettings.Registry", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "task.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "task.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ }},
+ {Target: "task.AffinityInfo", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "task.AffinityInfo.AffinityID", Name: validation.Null, Rule: true, Chain: nil}}},
+ {Target: "task.MultiInstanceSettings", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "task.MultiInstanceSettings.CoordinationCommandLine", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil {
+ return result, validation.NewError("batch.TaskClient", "Add", err.Error())
+ }
+
+ req, err := client.AddPreparer(ctx, jobID, task, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.TaskClient", "Add", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.AddSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.TaskClient", "Add", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.AddResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.TaskClient", "Add", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// AddPreparer prepares the Add request.
+func (client TaskClient) AddPreparer(ctx context.Context, jobID string, task TaskAddParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "jobId": autorest.Encode("path", jobID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/jobs/{jobId}/tasks", pathParameters),
+ autorest.WithJSON(task),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// AddSender sends the Add request. The method will close the
+// http.Response Body if it receives an error.
+func (client TaskClient) AddSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// AddResponder handles the response to the Add request. The method always
+// closes the http.Response Body.
+func (client TaskClient) AddResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// AddCollection note that each Task must have a unique ID. The Batch service may not return the results for each Task
+// in the same order the Tasks were submitted in this request. If the server times out or the connection is closed
+// during the request, the request may have been partially or fully processed, or not at all. In such cases, the user
+// should re-issue the request. Note that it is up to the user to correctly handle failures when re-issuing a request.
+// For example, you should use the same Task IDs during a retry so that if the prior operation succeeded, the retry
+// will not create extra Tasks unexpectedly. If the response contains any Tasks which failed to add, a client can retry
+// the request. In a retry, it is most efficient to resubmit only Tasks that failed to add, and to omit Tasks that were
+// successfully added on the first attempt. The maximum lifetime of a Task from addition to completion is 180 days. If
+// a Task has not completed within 180 days of being added it will be terminated by the Batch service and left in
+// whatever state it was in at that time.
+// Parameters:
+// jobID - the ID of the Job to which the Task collection is to be added.
+// taskCollection - the Tasks to be added.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client TaskClient) AddCollection(ctx context.Context, jobID string, taskCollection TaskAddCollectionParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result TaskAddCollectionResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TaskClient.AddCollection")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: taskCollection,
+ Constraints: []validation.Constraint{{Target: "taskCollection.Value", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "taskCollection.Value", Name: validation.MaxItems, Rule: 100, Chain: nil}}}}}}); err != nil {
+ return result, validation.NewError("batch.TaskClient", "AddCollection", err.Error())
+ }
+
+ req, err := client.AddCollectionPreparer(ctx, jobID, taskCollection, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.TaskClient", "AddCollection", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.AddCollectionSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.TaskClient", "AddCollection", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.AddCollectionResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.TaskClient", "AddCollection", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// AddCollectionPreparer prepares the AddCollection request.
+func (client TaskClient) AddCollectionPreparer(ctx context.Context, jobID string, taskCollection TaskAddCollectionParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "jobId": autorest.Encode("path", jobID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/jobs/{jobId}/addtaskcollection", pathParameters),
+ autorest.WithJSON(taskCollection),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// AddCollectionSender sends the AddCollection request. The method will close the
+// http.Response Body if it receives an error.
+func (client TaskClient) AddCollectionSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// AddCollectionResponder handles the response to the AddCollection request. The method always
+// closes the http.Response Body.
+func (client TaskClient) AddCollectionResponder(resp *http.Response) (result TaskAddCollectionResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete when a Task is deleted, all of the files in its directory on the Compute Node where it ran are also deleted
+// (regardless of the retention time). For multi-instance Tasks, the delete Task operation applies synchronously to the
+// primary task; subtasks and their files are then deleted asynchronously in the background.
+// Parameters:
+// jobID - the ID of the Job from which to delete the Task.
+// taskID - the ID of the Task to delete.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will
+// be performed only if the resource's current ETag on the service exactly matches the value specified by the
+// client.
+// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation
+// will be performed only if the resource's current ETag on the service does not match the value specified by
+// the client.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client TaskClient) Delete(ctx context.Context, jobID string, taskID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TaskClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, jobID, taskID, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.TaskClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.TaskClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.TaskClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client TaskClient) DeletePreparer(ctx context.Context, jobID string, taskID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "jobId": autorest.Encode("path", jobID),
+ "taskId": autorest.Encode("path", taskID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/jobs/{jobId}/tasks/{taskId}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client TaskClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client TaskClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get for multi-instance Tasks, information such as affinityId, executionInfo and nodeInfo refer to the primary Task.
+// Use the list subtasks API to retrieve information about subtasks.
+// Parameters:
+// jobID - the ID of the Job that contains the Task.
+// taskID - the ID of the Task to get information about.
+// selectParameter - an OData $select clause.
+// expand - an OData $expand clause.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will
+// be performed only if the resource's current ETag on the service exactly matches the value specified by the
+// client.
+// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation
+// will be performed only if the resource's current ETag on the service does not match the value specified by
+// the client.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client TaskClient) Get(ctx context.Context, jobID string, taskID string, selectParameter string, expand string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result CloudTask, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TaskClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, jobID, taskID, selectParameter, expand, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.TaskClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.TaskClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.TaskClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client TaskClient) GetPreparer(ctx context.Context, jobID string, taskID string, selectParameter string, expand string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "jobId": autorest.Encode("path", jobID),
+ "taskId": autorest.Encode("path", taskID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(selectParameter) > 0 {
+ queryParameters["$select"] = autorest.Encode("query", selectParameter)
+ }
+ if len(expand) > 0 {
+ queryParameters["$expand"] = autorest.Encode("query", expand)
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/jobs/{jobId}/tasks/{taskId}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client TaskClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client TaskClient) GetResponder(resp *http.Response) (result CloudTask, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List for multi-instance Tasks, information such as affinityId, executionInfo and nodeInfo refer to the primary Task.
+// Use the list subtasks API to retrieve information about subtasks.
+// Parameters:
+// jobID - the ID of the Job.
+// filter - an OData $filter clause. For more information on constructing this filter, see
+// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks.
+// selectParameter - an OData $select clause.
+// expand - an OData $expand clause.
+// maxResults - the maximum number of items to return in the response. A maximum of 1000 Tasks can be returned.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client TaskClient) List(ctx context.Context, jobID string, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result CloudTaskListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TaskClient.List")
+ defer func() {
+ sc := -1
+ if result.ctlr.Response.Response != nil {
+ sc = result.ctlr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: maxResults,
+ Constraints: []validation.Constraint{{Target: "maxResults", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "maxResults", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil},
+ {Target: "maxResults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
+ }}}}}); err != nil {
+ return result, validation.NewError("batch.TaskClient", "List", err.Error())
+ }
+
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx, jobID, filter, selectParameter, expand, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.TaskClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.ctlr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.TaskClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.ctlr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.TaskClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client TaskClient) ListPreparer(ctx context.Context, jobID string, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "jobId": autorest.Encode("path", jobID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
+ if len(selectParameter) > 0 {
+ queryParameters["$select"] = autorest.Encode("query", selectParameter)
+ }
+ if len(expand) > 0 {
+ queryParameters["$expand"] = autorest.Encode("query", expand)
+ }
+ if maxResults != nil {
+ queryParameters["maxresults"] = autorest.Encode("query", *maxResults)
+ } else {
+ queryParameters["maxresults"] = autorest.Encode("query", 1000)
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/jobs/{jobId}/tasks", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client TaskClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client TaskClient) ListResponder(resp *http.Response) (result CloudTaskListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client TaskClient) listNextResults(ctx context.Context, lastResults CloudTaskListResult) (result CloudTaskListResult, err error) {
+ req, err := lastResults.cloudTaskListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "batch.TaskClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "batch.TaskClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.TaskClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client TaskClient) ListComplete(ctx context.Context, jobID string, filter string, selectParameter string, expand string, maxResults *int32, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result CloudTaskListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TaskClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, jobID, filter, selectParameter, expand, maxResults, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ return
+}
+
+// ListSubtasks if the Task is not a multi-instance Task then this returns an empty collection.
+// Parameters:
+// jobID - the ID of the Job.
+// taskID - the ID of the Task.
+// selectParameter - an OData $select clause.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+func (client TaskClient) ListSubtasks(ctx context.Context, jobID string, taskID string, selectParameter string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (result CloudTaskListSubtasksResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TaskClient.ListSubtasks")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.ListSubtasksPreparer(ctx, jobID, taskID, selectParameter, timeout, clientRequestID, returnClientRequestID, ocpDate)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.TaskClient", "ListSubtasks", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSubtasksSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.TaskClient", "ListSubtasks", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListSubtasksResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.TaskClient", "ListSubtasks", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListSubtasksPreparer prepares the ListSubtasks request.
+func (client TaskClient) ListSubtasksPreparer(ctx context.Context, jobID string, taskID string, selectParameter string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "jobId": autorest.Encode("path", jobID),
+ "taskId": autorest.Encode("path", taskID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(selectParameter) > 0 {
+ queryParameters["$select"] = autorest.Encode("query", selectParameter)
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/jobs/{jobId}/tasks/{taskId}/subtasksinfo", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSubtasksSender sends the ListSubtasks request. The method will close the
+// http.Response Body if it receives an error.
+func (client TaskClient) ListSubtasksSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListSubtasksResponder handles the response to the ListSubtasks request. The method always
+// closes the http.Response Body.
+func (client TaskClient) ListSubtasksResponder(resp *http.Response) (result CloudTaskListSubtasksResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Reactivate reactivation makes a Task eligible to be retried again up to its maximum retry count. The Task's state is
+// changed to active. As the Task is no longer in the completed state, any previous exit code or failure information is
+// no longer available after reactivation. Each time a Task is reactivated, its retry count is reset to 0. Reactivation
+// will fail for Tasks that are not completed or that previously completed successfully (with an exit code of 0).
+// Additionally, it will fail if the Job has completed (or is terminating or deleting).
+// Parameters:
+// jobID - the ID of the Job containing the Task.
+// taskID - the ID of the Task to reactivate.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will
+// be performed only if the resource's current ETag on the service exactly matches the value specified by the
+// client.
+// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation
+// will be performed only if the resource's current ETag on the service does not match the value specified by
+// the client.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client TaskClient) Reactivate(ctx context.Context, jobID string, taskID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TaskClient.Reactivate")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.ReactivatePreparer(ctx, jobID, taskID, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.TaskClient", "Reactivate", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ReactivateSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.TaskClient", "Reactivate", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ReactivateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.TaskClient", "Reactivate", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ReactivatePreparer prepares the Reactivate request.
+func (client TaskClient) ReactivatePreparer(ctx context.Context, jobID string, taskID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "jobId": autorest.Encode("path", jobID),
+ "taskId": autorest.Encode("path", taskID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/jobs/{jobId}/tasks/{taskId}/reactivate", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ReactivateSender sends the Reactivate request. The method will close the
+// http.Response Body if it receives an error.
+func (client TaskClient) ReactivateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ReactivateResponder handles the response to the Reactivate request. The method always
+// closes the http.Response Body.
+func (client TaskClient) ReactivateResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Terminate when the Task has been terminated, it moves to the completed state. For multi-instance Tasks, the
+// terminate Task operation applies synchronously to the primary task; subtasks are then terminated asynchronously in
+// the background.
+// Parameters:
+// jobID - the ID of the Job containing the Task.
+// taskID - the ID of the Task to terminate.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will
+// be performed only if the resource's current ETag on the service exactly matches the value specified by the
+// client.
+// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation
+// will be performed only if the resource's current ETag on the service does not match the value specified by
+// the client.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client TaskClient) Terminate(ctx context.Context, jobID string, taskID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TaskClient.Terminate")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.TerminatePreparer(ctx, jobID, taskID, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.TaskClient", "Terminate", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.TerminateSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.TaskClient", "Terminate", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.TerminateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.TaskClient", "Terminate", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// TerminatePreparer prepares the Terminate request.
+func (client TaskClient) TerminatePreparer(ctx context.Context, jobID string, taskID string, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "jobId": autorest.Encode("path", jobID),
+ "taskId": autorest.Encode("path", taskID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/jobs/{jobId}/tasks/{taskId}/terminate", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// TerminateSender sends the Terminate request. The method will close the
+// http.Response Body if it receives an error.
+func (client TaskClient) TerminateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// TerminateResponder handles the response to the Terminate request. The method always
+// closes the http.Response Body.
+func (client TaskClient) TerminateResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Update updates the properties of the specified Task.
+// Parameters:
+// jobID - the ID of the Job containing the Task.
+// taskID - the ID of the Task to update.
+// taskUpdateParameter - the parameters for the request.
+// timeout - the maximum time that the server can spend processing the request, in seconds. The default is 30
+// seconds.
+// clientRequestID - the caller-generated request identity, in the form of a GUID with no decoration such as
+// curly braces, e.g. 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
+// returnClientRequestID - whether the server should return the client-request-id in the response.
+// ocpDate - the time the request was issued. Client libraries typically set this to the current system clock
+// time; set it explicitly if you are calling the REST API directly.
+// ifMatch - an ETag value associated with the version of the resource known to the client. The operation will
+// be performed only if the resource's current ETag on the service exactly matches the value specified by the
+// client.
+// ifNoneMatch - an ETag value associated with the version of the resource known to the client. The operation
+// will be performed only if the resource's current ETag on the service does not match the value specified by
+// the client.
+// ifModifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has been modified since the specified time.
+// ifUnmodifiedSince - a timestamp indicating the last modified time of the resource known to the client. The
+// operation will be performed only if the resource on the service has not been modified since the specified
+// time.
+func (client TaskClient) Update(ctx context.Context, jobID string, taskID string, taskUpdateParameter TaskUpdateParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TaskClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.UpdatePreparer(ctx, jobID, taskID, taskUpdateParameter, timeout, clientRequestID, returnClientRequestID, ocpDate, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.TaskClient", "Update", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.UpdateSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.TaskClient", "Update", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.UpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.TaskClient", "Update", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// UpdatePreparer prepares the Update request.
+func (client TaskClient) UpdatePreparer(ctx context.Context, jobID string, taskID string, taskUpdateParameter TaskUpdateParameter, timeout *int32, clientRequestID *uuid.UUID, returnClientRequestID *bool, ocpDate *date.TimeRFC1123, ifMatch string, ifNoneMatch string, ifModifiedSince *date.TimeRFC1123, ifUnmodifiedSince *date.TimeRFC1123) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "batchUrl": client.BatchURL,
+ }
+
+ pathParameters := map[string]interface{}{
+ "jobId": autorest.Encode("path", jobID),
+ "taskId": autorest.Encode("path", taskID),
+ }
+
+ const APIVersion = "2019-08-01.10.0"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if timeout != nil {
+ queryParameters["timeout"] = autorest.Encode("query", *timeout)
+ } else {
+ queryParameters["timeout"] = autorest.Encode("query", 30)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; odata=minimalmetadata; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithCustomBaseURL("{batchUrl}", urlParameters),
+ autorest.WithPathParameters("/jobs/{jobId}/tasks/{taskId}", pathParameters),
+ autorest.WithJSON(taskUpdateParameter),
+ autorest.WithQueryParameters(queryParameters))
+ if clientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("client-request-id", autorest.String(clientRequestID)))
+ }
+ if returnClientRequestID != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(returnClientRequestID)))
+ } else {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("return-client-request-id", autorest.String(false)))
+ }
+ if ocpDate != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("ocp-date", autorest.String(ocpDate)))
+ }
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ if ifModifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Modified-Since", autorest.String(ifModifiedSince)))
+ }
+ if ifUnmodifiedSince != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Unmodified-Since", autorest.String(ifUnmodifiedSince)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateSender sends the Update request. The method will close the
+// http.Response Body if it receives an error.
+func (client TaskClient) UpdateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// UpdateResponder handles the response to the Update request. The method always
+// closes the http.Response Body.
+func (client TaskClient) UpdateResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
diff --git a/services/batch/2019-08-01.10.0/batch/version.go b/services/batch/2019-08-01.10.0/batch/version.go
new file mode 100644
index 000000000000..17e030fe0cc3
--- /dev/null
+++ b/services/batch/2019-08-01.10.0/batch/version.go
@@ -0,0 +1,30 @@
+package batch
+
+import "github.com/Azure/azure-sdk-for-go/version"
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+// UserAgent returns the UserAgent string to use when sending http.Requests.
+func UserAgent() string {
+ return "Azure-SDK-For-Go/" + version.Number + " batch/2019-08-01.10.0"
+}
+
+// Version returns the semantic version (see http://semver.org) of the client.
+func Version() string {
+ return version.Number
+}
diff --git a/services/batch/mgmt/2019-08-01/batch/account.go b/services/batch/mgmt/2019-08-01/batch/account.go
new file mode 100644
index 000000000000..f565ba8f7ecb
--- /dev/null
+++ b/services/batch/mgmt/2019-08-01/batch/account.go
@@ -0,0 +1,884 @@
+package batch
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// AccountClient is the client for the Account methods of the Batch service.
+type AccountClient struct {
+ BaseClient
+}
+
+// NewAccountClient creates an instance of the AccountClient client.
+func NewAccountClient(subscriptionID string) AccountClient {
+ return NewAccountClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewAccountClientWithBaseURI creates an instance of the AccountClient client.
+func NewAccountClientWithBaseURI(baseURI string, subscriptionID string) AccountClient {
+ return AccountClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// Create creates a new Batch account with the specified parameters. Existing accounts cannot be updated with this API
+// and should instead be updated with the Update Batch Account API.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the Batch account.
+// accountName - a name for the Batch account which must be unique within the region. Batch account names must
+// be between 3 and 24 characters in length and must use only numbers and lowercase letters. This name is used
+// as part of the DNS name that is used to access the Batch service in the region in which the account is
+// created. For example: http://accountname.region.batch.azure.com/.
+// parameters - additional parameters for account creation.
+func (client AccountClient) Create(ctx context.Context, resourceGroupName string, accountName string, parameters AccountCreateParameters) (result AccountCreateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountClient.Create")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "accountName", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}},
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters.Location", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.AccountCreateProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.AccountCreateProperties.AutoStorage", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.AccountCreateProperties.AutoStorage.StorageAccountID", Name: validation.Null, Rule: true, Chain: nil}}},
+ {Target: "parameters.AccountCreateProperties.KeyVaultReference", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.AccountCreateProperties.KeyVaultReference.ID", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.AccountCreateProperties.KeyVaultReference.URL", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ }}}}}); err != nil {
+ return result, validation.NewError("batch.AccountClient", "Create", err.Error())
+ }
+
+ req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "Create", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.CreateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "Create", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// CreatePreparer prepares the Create request.
+func (client AccountClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, parameters AccountCreateParameters) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateSender sends the Create request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountClient) CreateSender(req *http.Request) (future AccountCreateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// CreateResponder handles the response to the Create request. The method always
+// closes the http.Response Body.
+func (client AccountClient) CreateResponder(resp *http.Response) (result Account, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes the specified Batch account.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the Batch account.
+// accountName - the name of the Batch account.
+func (client AccountClient) Delete(ctx context.Context, resourceGroupName string, accountName string) (result AccountDeleteFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "accountName", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.AccountClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.DeleteSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "Delete", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client AccountClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountClient) DeleteSender(req *http.Request) (future AccountDeleteFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client AccountClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets information about the specified Batch account.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the Batch account.
+// accountName - the name of the Batch account.
+func (client AccountClient) Get(ctx context.Context, resourceGroupName string, accountName string) (result Account, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "accountName", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.AccountClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client AccountClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client AccountClient) GetResponder(resp *http.Response) (result Account, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetKeys this operation applies only to Batch accounts created with a poolAllocationMode of 'BatchService'. If the
+// Batch account was created with a poolAllocationMode of 'UserSubscription', clients cannot use access to keys to
+// authenticate, and must use Azure Active Directory instead. In this case, getting the keys will fail.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the Batch account.
+// accountName - the name of the Batch account.
+func (client AccountClient) GetKeys(ctx context.Context, resourceGroupName string, accountName string) (result AccountKeys, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountClient.GetKeys")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "accountName", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.AccountClient", "GetKeys", err.Error())
+ }
+
+ req, err := client.GetKeysPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "GetKeys", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetKeysSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "GetKeys", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetKeysResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "GetKeys", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetKeysPreparer prepares the GetKeys request.
+func (client AccountClient) GetKeysPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/listKeys", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetKeysSender sends the GetKeys request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountClient) GetKeysSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetKeysResponder handles the response to the GetKeys request. The method always
+// closes the http.Response Body.
+func (client AccountClient) GetKeysResponder(resp *http.Response) (result AccountKeys, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List gets information about the Batch accounts associated with the subscription.
+func (client AccountClient) List(ctx context.Context) (result AccountListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountClient.List")
+ defer func() {
+ sc := -1
+ if result.alr.Response.Response != nil {
+ sc = result.alr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.alr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.alr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client AccountClient) ListPreparer(ctx context.Context) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Batch/batchAccounts", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client AccountClient) ListResponder(resp *http.Response) (result AccountListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client AccountClient) listNextResults(ctx context.Context, lastResults AccountListResult) (result AccountListResult, err error) {
+ req, err := lastResults.accountListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "batch.AccountClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "batch.AccountClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client AccountClient) ListComplete(ctx context.Context) (result AccountListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx)
+ return
+}
+
+// ListByResourceGroup gets information about the Batch accounts associated with the specified resource group.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the Batch account.
+func (client AccountClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result AccountListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.alr.Response.Response != nil {
+ sc = result.alr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listByResourceGroupNextResults
+ req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "ListByResourceGroup", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByResourceGroupSender(req)
+ if err != nil {
+ result.alr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "ListByResourceGroup", resp, "Failure sending request")
+ return
+ }
+
+ result.alr, err = client.ListByResourceGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "ListByResourceGroup", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
+func (client AccountClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
+// closes the http.Response Body.
+func (client AccountClient) ListByResourceGroupResponder(resp *http.Response) (result AccountListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByResourceGroupNextResults retrieves the next set of results, if any.
+func (client AccountClient) listByResourceGroupNextResults(ctx context.Context, lastResults AccountListResult) (result AccountListResult, err error) {
+ req, err := lastResults.accountListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "batch.AccountClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByResourceGroupSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "batch.AccountClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByResourceGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
+func (client AccountClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result AccountListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByResourceGroup(ctx, resourceGroupName)
+ return
+}
+
+// RegenerateKey regenerates the specified account key for the Batch account.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the Batch account.
+// accountName - the name of the Batch account.
+// parameters - the type of key to regenerate.
+func (client AccountClient) RegenerateKey(ctx context.Context, resourceGroupName string, accountName string, parameters AccountRegenerateKeyParameters) (result AccountKeys, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountClient.RegenerateKey")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "accountName", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.AccountClient", "RegenerateKey", err.Error())
+ }
+
+ req, err := client.RegenerateKeyPreparer(ctx, resourceGroupName, accountName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "RegenerateKey", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.RegenerateKeySender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "RegenerateKey", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.RegenerateKeyResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "RegenerateKey", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// RegenerateKeyPreparer prepares the RegenerateKey request.
+func (client AccountClient) RegenerateKeyPreparer(ctx context.Context, resourceGroupName string, accountName string, parameters AccountRegenerateKeyParameters) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/regenerateKeys", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// RegenerateKeySender sends the RegenerateKey request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountClient) RegenerateKeySender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// RegenerateKeyResponder handles the response to the RegenerateKey request. The method always
+// closes the http.Response Body.
+func (client AccountClient) RegenerateKeyResponder(resp *http.Response) (result AccountKeys, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// SynchronizeAutoStorageKeys synchronizes access keys for the auto-storage account configured for the specified Batch
+// account.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the Batch account.
+// accountName - the name of the Batch account.
+func (client AccountClient) SynchronizeAutoStorageKeys(ctx context.Context, resourceGroupName string, accountName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountClient.SynchronizeAutoStorageKeys")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "accountName", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.AccountClient", "SynchronizeAutoStorageKeys", err.Error())
+ }
+
+ req, err := client.SynchronizeAutoStorageKeysPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "SynchronizeAutoStorageKeys", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.SynchronizeAutoStorageKeysSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "SynchronizeAutoStorageKeys", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.SynchronizeAutoStorageKeysResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "SynchronizeAutoStorageKeys", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// SynchronizeAutoStorageKeysPreparer prepares the SynchronizeAutoStorageKeys request.
+func (client AccountClient) SynchronizeAutoStorageKeysPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/syncAutoStorageKeys", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// SynchronizeAutoStorageKeysSender sends the SynchronizeAutoStorageKeys request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountClient) SynchronizeAutoStorageKeysSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// SynchronizeAutoStorageKeysResponder handles the response to the SynchronizeAutoStorageKeys request. The method always
+// closes the http.Response Body.
+func (client AccountClient) SynchronizeAutoStorageKeysResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Update updates the properties of an existing Batch account.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the Batch account.
+// accountName - the name of the Batch account.
+// parameters - additional parameters for account update.
+func (client AccountClient) Update(ctx context.Context, resourceGroupName string, accountName string, parameters AccountUpdateParameters) (result Account, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "accountName", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.AccountClient", "Update", err.Error())
+ }
+
+ req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "Update", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.UpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "Update", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.UpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.AccountClient", "Update", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// UpdatePreparer prepares the Update request.
+func (client AccountClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, parameters AccountUpdateParameters) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateSender sends the Update request. The method will close the
+// http.Response Body if it receives an error.
+func (client AccountClient) UpdateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// UpdateResponder handles the response to the Update request. The method always
+// closes the http.Response Body.
+func (client AccountClient) UpdateResponder(resp *http.Response) (result Account, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/batch/mgmt/2019-08-01/batch/application.go b/services/batch/mgmt/2019-08-01/batch/application.go
new file mode 100644
index 000000000000..7f1d2bf3e27b
--- /dev/null
+++ b/services/batch/mgmt/2019-08-01/batch/application.go
@@ -0,0 +1,541 @@
+package batch
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// ApplicationClient is the client for the Application methods of the Batch service.
+type ApplicationClient struct {
+ BaseClient
+}
+
+// NewApplicationClient creates an instance of the ApplicationClient client.
+func NewApplicationClient(subscriptionID string) ApplicationClient {
+ return NewApplicationClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewApplicationClientWithBaseURI creates an instance of the ApplicationClient client.
+func NewApplicationClientWithBaseURI(baseURI string, subscriptionID string) ApplicationClient {
+ return ApplicationClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// Create adds an application to the specified Batch account.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the Batch account.
+// accountName - the name of the Batch account.
+// applicationName - the name of the application. This must be unique within the account.
+// parameters - the parameters for the request.
+func (client ApplicationClient) Create(ctx context.Context, resourceGroupName string, accountName string, applicationName string, parameters *Application) (result Application, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationClient.Create")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "accountName", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}},
+ {TargetValue: applicationName,
+ Constraints: []validation.Constraint{{Target: "applicationName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "applicationName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "applicationName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_-]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.ApplicationClient", "Create", err.Error())
+ }
+
+ req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, applicationName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ApplicationClient", "Create", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.ApplicationClient", "Create", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ApplicationClient", "Create", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreatePreparer prepares the Create request.
+func (client ApplicationClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, applicationName string, parameters *Application) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "applicationName": autorest.Encode("path", applicationName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if parameters != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithJSON(parameters))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateSender sends the Create request. The method will close the
+// http.Response Body if it receives an error.
+func (client ApplicationClient) CreateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CreateResponder handles the response to the Create request. The method always
+// closes the http.Response Body.
+func (client ApplicationClient) CreateResponder(resp *http.Response) (result Application, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes an application.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the Batch account.
+// accountName - the name of the Batch account.
+// applicationName - the name of the application. This must be unique within the account.
+func (client ApplicationClient) Delete(ctx context.Context, resourceGroupName string, accountName string, applicationName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "accountName", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}},
+ {TargetValue: applicationName,
+ Constraints: []validation.Constraint{{Target: "applicationName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "applicationName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "applicationName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_-]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.ApplicationClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, applicationName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ApplicationClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.ApplicationClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ApplicationClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client ApplicationClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, applicationName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "applicationName": autorest.Encode("path", applicationName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client ApplicationClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client ApplicationClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets information about the specified application.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the Batch account.
+// accountName - the name of the Batch account.
+// applicationName - the name of the application. This must be unique within the account.
+func (client ApplicationClient) Get(ctx context.Context, resourceGroupName string, accountName string, applicationName string) (result Application, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "accountName", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}},
+ {TargetValue: applicationName,
+ Constraints: []validation.Constraint{{Target: "applicationName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "applicationName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "applicationName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_-]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.ApplicationClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, accountName, applicationName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ApplicationClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.ApplicationClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ApplicationClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client ApplicationClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, applicationName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "applicationName": autorest.Encode("path", applicationName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client ApplicationClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client ApplicationClient) GetResponder(resp *http.Response) (result Application, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List lists all of the applications in the specified account.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the Batch account.
+// accountName - the name of the Batch account.
+// maxresults - the maximum number of items to return in the response.
+func (client ApplicationClient) List(ctx context.Context, resourceGroupName string, accountName string, maxresults *int32) (result ListApplicationsResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationClient.List")
+ defer func() {
+ sc := -1
+ if result.lar.Response.Response != nil {
+ sc = result.lar.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "accountName", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.ApplicationClient", "List", err.Error())
+ }
+
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx, resourceGroupName, accountName, maxresults)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ApplicationClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.lar.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.ApplicationClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.lar, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ApplicationClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client ApplicationClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string, maxresults *int32) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if maxresults != nil {
+ queryParameters["maxresults"] = autorest.Encode("query", *maxresults)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client ApplicationClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client ApplicationClient) ListResponder(resp *http.Response) (result ListApplicationsResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client ApplicationClient) listNextResults(ctx context.Context, lastResults ListApplicationsResult) (result ListApplicationsResult, err error) {
+ req, err := lastResults.listApplicationsResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "batch.ApplicationClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "batch.ApplicationClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ApplicationClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client ApplicationClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string, maxresults *int32) (result ListApplicationsResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, resourceGroupName, accountName, maxresults)
+ return
+}
+
+// Update updates settings for the specified application.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the Batch account.
+// accountName - the name of the Batch account.
+// applicationName - the name of the application. This must be unique within the account.
+// parameters - the parameters for the request.
+func (client ApplicationClient) Update(ctx context.Context, resourceGroupName string, accountName string, applicationName string, parameters Application) (result Application, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "accountName", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}},
+ {TargetValue: applicationName,
+ Constraints: []validation.Constraint{{Target: "applicationName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "applicationName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "applicationName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_-]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.ApplicationClient", "Update", err.Error())
+ }
+
+ req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, applicationName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ApplicationClient", "Update", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.UpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.ApplicationClient", "Update", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.UpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ApplicationClient", "Update", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// UpdatePreparer prepares the Update request.
+func (client ApplicationClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, applicationName string, parameters Application) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "applicationName": autorest.Encode("path", applicationName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateSender sends the Update request. The method will close the
+// http.Response Body if it receives an error.
+func (client ApplicationClient) UpdateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// UpdateResponder handles the response to the Update request. The method always
+// closes the http.Response Body.
+func (client ApplicationClient) UpdateResponder(resp *http.Response) (result Application, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/batch/mgmt/2019-08-01/batch/applicationpackage.go b/services/batch/mgmt/2019-08-01/batch/applicationpackage.go
new file mode 100644
index 000000000000..a210f623ea73
--- /dev/null
+++ b/services/batch/mgmt/2019-08-01/batch/applicationpackage.go
@@ -0,0 +1,573 @@
+package batch
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// ApplicationPackageClient is the client for the ApplicationPackage methods of the Batch service.
+type ApplicationPackageClient struct {
+ BaseClient
+}
+
+// NewApplicationPackageClient creates an instance of the ApplicationPackageClient client.
+func NewApplicationPackageClient(subscriptionID string) ApplicationPackageClient {
+ return NewApplicationPackageClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewApplicationPackageClientWithBaseURI creates an instance of the ApplicationPackageClient client.
+func NewApplicationPackageClientWithBaseURI(baseURI string, subscriptionID string) ApplicationPackageClient {
+ return ApplicationPackageClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// Activate activates the specified application package.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the Batch account.
+// accountName - the name of the Batch account.
+// applicationName - the name of the application. This must be unique within the account.
+// versionName - the version of the application.
+// parameters - the parameters for the request.
+func (client ApplicationPackageClient) Activate(ctx context.Context, resourceGroupName string, accountName string, applicationName string, versionName string, parameters ActivateApplicationPackageParameters) (result ApplicationPackage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationPackageClient.Activate")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "accountName", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}},
+ {TargetValue: applicationName,
+ Constraints: []validation.Constraint{{Target: "applicationName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "applicationName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "applicationName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_-]+$`, Chain: nil}}},
+ {TargetValue: versionName,
+ Constraints: []validation.Constraint{{Target: "versionName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "versionName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "versionName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_-][a-zA-Z0-9_.-]*$`, Chain: nil}}},
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters.Format", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.ApplicationPackageClient", "Activate", err.Error())
+ }
+
+ req, err := client.ActivatePreparer(ctx, resourceGroupName, accountName, applicationName, versionName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ApplicationPackageClient", "Activate", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ActivateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.ApplicationPackageClient", "Activate", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ActivateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ApplicationPackageClient", "Activate", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ActivatePreparer prepares the Activate request.
+func (client ApplicationPackageClient) ActivatePreparer(ctx context.Context, resourceGroupName string, accountName string, applicationName string, versionName string, parameters ActivateApplicationPackageParameters) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "applicationName": autorest.Encode("path", applicationName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "versionName": autorest.Encode("path", versionName),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}/versions/{versionName}/activate", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ActivateSender sends the Activate request. The method will close the
+// http.Response Body if it receives an error.
+func (client ApplicationPackageClient) ActivateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ActivateResponder handles the response to the Activate request. The method always
+// closes the http.Response Body.
+func (client ApplicationPackageClient) ActivateResponder(resp *http.Response) (result ApplicationPackage, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Create creates an application package record.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the Batch account.
+// accountName - the name of the Batch account.
+// applicationName - the name of the application. This must be unique within the account.
+// versionName - the version of the application.
+// parameters - the parameters for the request.
+func (client ApplicationPackageClient) Create(ctx context.Context, resourceGroupName string, accountName string, applicationName string, versionName string, parameters *ApplicationPackage) (result ApplicationPackage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationPackageClient.Create")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "accountName", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}},
+ {TargetValue: applicationName,
+ Constraints: []validation.Constraint{{Target: "applicationName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "applicationName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "applicationName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_-]+$`, Chain: nil}}},
+ {TargetValue: versionName,
+ Constraints: []validation.Constraint{{Target: "versionName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "versionName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "versionName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_-][a-zA-Z0-9_.-]*$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.ApplicationPackageClient", "Create", err.Error())
+ }
+
+ req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, applicationName, versionName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ApplicationPackageClient", "Create", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.ApplicationPackageClient", "Create", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ApplicationPackageClient", "Create", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreatePreparer prepares the Create request.
+func (client ApplicationPackageClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, applicationName string, versionName string, parameters *ApplicationPackage) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "applicationName": autorest.Encode("path", applicationName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "versionName": autorest.Encode("path", versionName),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}/versions/{versionName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if parameters != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithJSON(parameters))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateSender sends the Create request. The method will close the
+// http.Response Body if it receives an error.
+func (client ApplicationPackageClient) CreateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CreateResponder handles the response to the Create request. The method always
+// closes the http.Response Body.
+func (client ApplicationPackageClient) CreateResponder(resp *http.Response) (result ApplicationPackage, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes an application package record and its associated binary file.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the Batch account.
+// accountName - the name of the Batch account.
+// applicationName - the name of the application. This must be unique within the account.
+// versionName - the version of the application.
+func (client ApplicationPackageClient) Delete(ctx context.Context, resourceGroupName string, accountName string, applicationName string, versionName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationPackageClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "accountName", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}},
+ {TargetValue: applicationName,
+ Constraints: []validation.Constraint{{Target: "applicationName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "applicationName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "applicationName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_-]+$`, Chain: nil}}},
+ {TargetValue: versionName,
+ Constraints: []validation.Constraint{{Target: "versionName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "versionName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "versionName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_-][a-zA-Z0-9_.-]*$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.ApplicationPackageClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, applicationName, versionName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ApplicationPackageClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "batch.ApplicationPackageClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ApplicationPackageClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client ApplicationPackageClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, applicationName string, versionName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "applicationName": autorest.Encode("path", applicationName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "versionName": autorest.Encode("path", versionName),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}/versions/{versionName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client ApplicationPackageClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client ApplicationPackageClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets information about the specified application package.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the Batch account.
+// accountName - the name of the Batch account.
+// applicationName - the name of the application. This must be unique within the account.
+// versionName - the version of the application.
+func (client ApplicationPackageClient) Get(ctx context.Context, resourceGroupName string, accountName string, applicationName string, versionName string) (result ApplicationPackage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationPackageClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "accountName", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}},
+ {TargetValue: applicationName,
+ Constraints: []validation.Constraint{{Target: "applicationName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "applicationName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "applicationName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_-]+$`, Chain: nil}}},
+ {TargetValue: versionName,
+ Constraints: []validation.Constraint{{Target: "versionName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "versionName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "versionName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_-][a-zA-Z0-9_.-]*$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.ApplicationPackageClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, accountName, applicationName, versionName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ApplicationPackageClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.ApplicationPackageClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ApplicationPackageClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client ApplicationPackageClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, applicationName string, versionName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "applicationName": autorest.Encode("path", applicationName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "versionName": autorest.Encode("path", versionName),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}/versions/{versionName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client ApplicationPackageClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client ApplicationPackageClient) GetResponder(resp *http.Response) (result ApplicationPackage, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List lists all of the application packages in the specified application.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the Batch account.
+// accountName - the name of the Batch account.
+// applicationName - the name of the application. This must be unique within the account.
+// maxresults - the maximum number of items to return in the response.
+func (client ApplicationPackageClient) List(ctx context.Context, resourceGroupName string, accountName string, applicationName string, maxresults *int32) (result ListApplicationPackagesResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationPackageClient.List")
+ defer func() {
+ sc := -1
+ if result.lapr.Response.Response != nil {
+ sc = result.lapr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "accountName", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}},
+ {TargetValue: applicationName,
+ Constraints: []validation.Constraint{{Target: "applicationName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "applicationName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "applicationName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_-]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.ApplicationPackageClient", "List", err.Error())
+ }
+
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx, resourceGroupName, accountName, applicationName, maxresults)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ApplicationPackageClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.lapr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.ApplicationPackageClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.lapr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ApplicationPackageClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client ApplicationPackageClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string, applicationName string, maxresults *int32) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "applicationName": autorest.Encode("path", applicationName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if maxresults != nil {
+ queryParameters["maxresults"] = autorest.Encode("query", *maxresults)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}/versions", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client ApplicationPackageClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client ApplicationPackageClient) ListResponder(resp *http.Response) (result ListApplicationPackagesResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client ApplicationPackageClient) listNextResults(ctx context.Context, lastResults ListApplicationPackagesResult) (result ListApplicationPackagesResult, err error) {
+ req, err := lastResults.listApplicationPackagesResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "batch.ApplicationPackageClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "batch.ApplicationPackageClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.ApplicationPackageClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client ApplicationPackageClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string, applicationName string, maxresults *int32) (result ListApplicationPackagesResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationPackageClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, resourceGroupName, accountName, applicationName, maxresults)
+ return
+}
diff --git a/services/batch/mgmt/2019-08-01/batch/batchapi/interfaces.go b/services/batch/mgmt/2019-08-01/batch/batchapi/interfaces.go
new file mode 100644
index 000000000000..d57a7236370f
--- /dev/null
+++ b/services/batch/mgmt/2019-08-01/batch/batchapi/interfaces.go
@@ -0,0 +1,101 @@
+package batchapi
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/azure-sdk-for-go/services/batch/mgmt/2019-08-01/batch"
+ "github.com/Azure/go-autorest/autorest"
+)
+
+// AccountClientAPI contains the set of methods on the AccountClient type.
+type AccountClientAPI interface {
+ Create(ctx context.Context, resourceGroupName string, accountName string, parameters batch.AccountCreateParameters) (result batch.AccountCreateFuture, err error)
+ Delete(ctx context.Context, resourceGroupName string, accountName string) (result batch.AccountDeleteFuture, err error)
+ Get(ctx context.Context, resourceGroupName string, accountName string) (result batch.Account, err error)
+ GetKeys(ctx context.Context, resourceGroupName string, accountName string) (result batch.AccountKeys, err error)
+ List(ctx context.Context) (result batch.AccountListResultPage, err error)
+ ListByResourceGroup(ctx context.Context, resourceGroupName string) (result batch.AccountListResultPage, err error)
+ RegenerateKey(ctx context.Context, resourceGroupName string, accountName string, parameters batch.AccountRegenerateKeyParameters) (result batch.AccountKeys, err error)
+ SynchronizeAutoStorageKeys(ctx context.Context, resourceGroupName string, accountName string) (result autorest.Response, err error)
+ Update(ctx context.Context, resourceGroupName string, accountName string, parameters batch.AccountUpdateParameters) (result batch.Account, err error)
+}
+
+var _ AccountClientAPI = (*batch.AccountClient)(nil)
+
+// ApplicationPackageClientAPI contains the set of methods on the ApplicationPackageClient type.
+type ApplicationPackageClientAPI interface {
+ Activate(ctx context.Context, resourceGroupName string, accountName string, applicationName string, versionName string, parameters batch.ActivateApplicationPackageParameters) (result batch.ApplicationPackage, err error)
+ Create(ctx context.Context, resourceGroupName string, accountName string, applicationName string, versionName string, parameters *batch.ApplicationPackage) (result batch.ApplicationPackage, err error)
+ Delete(ctx context.Context, resourceGroupName string, accountName string, applicationName string, versionName string) (result autorest.Response, err error)
+ Get(ctx context.Context, resourceGroupName string, accountName string, applicationName string, versionName string) (result batch.ApplicationPackage, err error)
+ List(ctx context.Context, resourceGroupName string, accountName string, applicationName string, maxresults *int32) (result batch.ListApplicationPackagesResultPage, err error)
+}
+
+var _ ApplicationPackageClientAPI = (*batch.ApplicationPackageClient)(nil)
+
+// ApplicationClientAPI contains the set of methods on the ApplicationClient type.
+type ApplicationClientAPI interface {
+ Create(ctx context.Context, resourceGroupName string, accountName string, applicationName string, parameters *batch.Application) (result batch.Application, err error)
+ Delete(ctx context.Context, resourceGroupName string, accountName string, applicationName string) (result autorest.Response, err error)
+ Get(ctx context.Context, resourceGroupName string, accountName string, applicationName string) (result batch.Application, err error)
+ List(ctx context.Context, resourceGroupName string, accountName string, maxresults *int32) (result batch.ListApplicationsResultPage, err error)
+ Update(ctx context.Context, resourceGroupName string, accountName string, applicationName string, parameters batch.Application) (result batch.Application, err error)
+}
+
+var _ ApplicationClientAPI = (*batch.ApplicationClient)(nil)
+
+// LocationClientAPI contains the set of methods on the LocationClient type.
+type LocationClientAPI interface {
+ CheckNameAvailability(ctx context.Context, locationName string, parameters batch.CheckNameAvailabilityParameters) (result batch.CheckNameAvailabilityResult, err error)
+ GetQuotas(ctx context.Context, locationName string) (result batch.LocationQuota, err error)
+}
+
+var _ LocationClientAPI = (*batch.LocationClient)(nil)
+
+// OperationsClientAPI contains the set of methods on the OperationsClient type.
+type OperationsClientAPI interface {
+ List(ctx context.Context) (result batch.OperationListResultPage, err error)
+}
+
+var _ OperationsClientAPI = (*batch.OperationsClient)(nil)
+
+// CertificateClientAPI contains the set of methods on the CertificateClient type.
+type CertificateClientAPI interface {
+ CancelDeletion(ctx context.Context, resourceGroupName string, accountName string, certificateName string) (result batch.Certificate, err error)
+ Create(ctx context.Context, resourceGroupName string, accountName string, certificateName string, parameters batch.CertificateCreateOrUpdateParameters, ifMatch string, ifNoneMatch string) (result batch.CertificateCreateFuture, err error)
+ Delete(ctx context.Context, resourceGroupName string, accountName string, certificateName string) (result batch.CertificateDeleteFuture, err error)
+ Get(ctx context.Context, resourceGroupName string, accountName string, certificateName string) (result batch.Certificate, err error)
+ ListByBatchAccount(ctx context.Context, resourceGroupName string, accountName string, maxresults *int32, selectParameter string, filter string) (result batch.ListCertificatesResultPage, err error)
+ Update(ctx context.Context, resourceGroupName string, accountName string, certificateName string, parameters batch.CertificateCreateOrUpdateParameters, ifMatch string) (result batch.Certificate, err error)
+}
+
+var _ CertificateClientAPI = (*batch.CertificateClient)(nil)
+
+// PoolClientAPI contains the set of methods on the PoolClient type.
+type PoolClientAPI interface {
+ Create(ctx context.Context, resourceGroupName string, accountName string, poolName string, parameters batch.Pool, ifMatch string, ifNoneMatch string) (result batch.PoolCreateFuture, err error)
+ Delete(ctx context.Context, resourceGroupName string, accountName string, poolName string) (result batch.PoolDeleteFuture, err error)
+ DisableAutoScale(ctx context.Context, resourceGroupName string, accountName string, poolName string) (result batch.Pool, err error)
+ Get(ctx context.Context, resourceGroupName string, accountName string, poolName string) (result batch.Pool, err error)
+ ListByBatchAccount(ctx context.Context, resourceGroupName string, accountName string, maxresults *int32, selectParameter string, filter string) (result batch.ListPoolsResultPage, err error)
+ StopResize(ctx context.Context, resourceGroupName string, accountName string, poolName string) (result batch.Pool, err error)
+ Update(ctx context.Context, resourceGroupName string, accountName string, poolName string, parameters batch.Pool, ifMatch string) (result batch.Pool, err error)
+}
+
+var _ PoolClientAPI = (*batch.PoolClient)(nil)
diff --git a/services/batch/mgmt/2019-08-01/batch/certificate.go b/services/batch/mgmt/2019-08-01/batch/certificate.go
new file mode 100644
index 000000000000..c3ae0c699876
--- /dev/null
+++ b/services/batch/mgmt/2019-08-01/batch/certificate.go
@@ -0,0 +1,669 @@
+package batch
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// CertificateClient is the client for the Certificate methods of the Batch service.
+type CertificateClient struct {
+ BaseClient
+}
+
+// NewCertificateClient creates an instance of the CertificateClient client.
+func NewCertificateClient(subscriptionID string) CertificateClient {
+ return NewCertificateClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewCertificateClientWithBaseURI creates an instance of the CertificateClient client.
+func NewCertificateClientWithBaseURI(baseURI string, subscriptionID string) CertificateClient {
+ return CertificateClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CancelDeletion if you try to delete a certificate that is being used by a pool or compute node, the status of the
+// certificate changes to deleteFailed. If you decide that you want to continue using the certificate, you can use this
+// operation to set the status of the certificate back to active. If you intend to delete the certificate, you do not
+// need to run this operation after the deletion failed. You must make sure that the certificate is not being used by
+// any resources, and then you can try again to delete the certificate.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the Batch account.
+// accountName - the name of the Batch account.
+// certificateName - the identifier for the certificate. This must be made up of algorithm and thumbprint
+// separated by a dash, and must match the certificate data in the request. For example SHA1-a3d1c5.
+func (client CertificateClient) CancelDeletion(ctx context.Context, resourceGroupName string, accountName string, certificateName string) (result Certificate, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CertificateClient.CancelDeletion")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "accountName", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}},
+ {TargetValue: certificateName,
+ Constraints: []validation.Constraint{{Target: "certificateName", Name: validation.MaxLength, Rule: 45, Chain: nil},
+ {Target: "certificateName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "certificateName", Name: validation.Pattern, Rule: `^[\w]+-[\w]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.CertificateClient", "CancelDeletion", err.Error())
+ }
+
+ req, err := client.CancelDeletionPreparer(ctx, resourceGroupName, accountName, certificateName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "CancelDeletion", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CancelDeletionSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "CancelDeletion", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CancelDeletionResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "CancelDeletion", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CancelDeletionPreparer prepares the CancelDeletion request.
+func (client CertificateClient) CancelDeletionPreparer(ctx context.Context, resourceGroupName string, accountName string, certificateName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "certificateName": autorest.Encode("path", certificateName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/certificates/{certificateName}/cancelDelete", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CancelDeletionSender sends the CancelDeletion request. The method will close the
+// http.Response Body if it receives an error.
+func (client CertificateClient) CancelDeletionSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CancelDeletionResponder handles the response to the CancelDeletion request. The method always
+// closes the http.Response Body.
+func (client CertificateClient) CancelDeletionResponder(resp *http.Response) (result Certificate, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Create creates a new certificate inside the specified account.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the Batch account.
+// accountName - the name of the Batch account.
+// certificateName - the identifier for the certificate. This must be made up of algorithm and thumbprint
+// separated by a dash, and must match the certificate data in the request. For example SHA1-a3d1c5.
+// parameters - additional parameters for certificate creation.
+// ifMatch - the entity state (ETag) version of the certificate to update. A value of "*" can be used to apply
+// the operation only if the certificate already exists. If omitted, this operation will always be applied.
+// ifNoneMatch - set to '*' to allow a new certificate to be created, but to prevent updating an existing
+// certificate. Other values will be ignored.
+func (client CertificateClient) Create(ctx context.Context, resourceGroupName string, accountName string, certificateName string, parameters CertificateCreateOrUpdateParameters, ifMatch string, ifNoneMatch string) (result CertificateCreateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CertificateClient.Create")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "accountName", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}},
+ {TargetValue: certificateName,
+ Constraints: []validation.Constraint{{Target: "certificateName", Name: validation.MaxLength, Rule: 45, Chain: nil},
+ {Target: "certificateName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "certificateName", Name: validation.Pattern, Rule: `^[\w]+-[\w]+$`, Chain: nil}}},
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters.CertificateCreateOrUpdateProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.CertificateCreateOrUpdateProperties.Data", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil {
+ return result, validation.NewError("batch.CertificateClient", "Create", err.Error())
+ }
+
+ req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, certificateName, parameters, ifMatch, ifNoneMatch)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "Create", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.CreateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "Create", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// CreatePreparer prepares the Create request.
+func (client CertificateClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, certificateName string, parameters CertificateCreateOrUpdateParameters, ifMatch string, ifNoneMatch string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "certificateName": autorest.Encode("path", certificateName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/certificates/{certificateName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateSender sends the Create request. The method will close the
+// http.Response Body if it receives an error.
+func (client CertificateClient) CreateSender(req *http.Request) (future CertificateCreateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// CreateResponder handles the response to the Create request. The method always
+// closes the http.Response Body.
+func (client CertificateClient) CreateResponder(resp *http.Response) (result Certificate, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes the specified certificate.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the Batch account.
+// accountName - the name of the Batch account.
+// certificateName - the identifier for the certificate. This must be made up of algorithm and thumbprint
+// separated by a dash, and must match the certificate data in the request. For example SHA1-a3d1c5.
+func (client CertificateClient) Delete(ctx context.Context, resourceGroupName string, accountName string, certificateName string) (result CertificateDeleteFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CertificateClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "accountName", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}},
+ {TargetValue: certificateName,
+ Constraints: []validation.Constraint{{Target: "certificateName", Name: validation.MaxLength, Rule: 45, Chain: nil},
+ {Target: "certificateName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "certificateName", Name: validation.Pattern, Rule: `^[\w]+-[\w]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.CertificateClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, certificateName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.DeleteSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "Delete", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client CertificateClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, certificateName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "certificateName": autorest.Encode("path", certificateName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/certificates/{certificateName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client CertificateClient) DeleteSender(req *http.Request) (future CertificateDeleteFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client CertificateClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets information about the specified certificate.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the Batch account.
+// accountName - the name of the Batch account.
+// certificateName - the identifier for the certificate. This must be made up of algorithm and thumbprint
+// separated by a dash, and must match the certificate data in the request. For example SHA1-a3d1c5.
+func (client CertificateClient) Get(ctx context.Context, resourceGroupName string, accountName string, certificateName string) (result Certificate, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CertificateClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "accountName", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}},
+ {TargetValue: certificateName,
+ Constraints: []validation.Constraint{{Target: "certificateName", Name: validation.MaxLength, Rule: 45, Chain: nil},
+ {Target: "certificateName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "certificateName", Name: validation.Pattern, Rule: `^[\w]+-[\w]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.CertificateClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, accountName, certificateName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client CertificateClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, certificateName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "certificateName": autorest.Encode("path", certificateName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/certificates/{certificateName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client CertificateClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client CertificateClient) GetResponder(resp *http.Response) (result Certificate, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByBatchAccount lists all of the certificates in the specified account.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the Batch account.
+// accountName - the name of the Batch account.
+// maxresults - the maximum number of items to return in the response.
+// selectParameter - comma separated list of properties that should be returned. e.g.
+// "properties/provisioningState". Only top level properties under properties/ are valid for selection.
+// filter - oData filter expression. Valid properties for filtering are "properties/provisioningState",
+// "properties/provisioningStateTransitionTime", "name".
+func (client CertificateClient) ListByBatchAccount(ctx context.Context, resourceGroupName string, accountName string, maxresults *int32, selectParameter string, filter string) (result ListCertificatesResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CertificateClient.ListByBatchAccount")
+ defer func() {
+ sc := -1
+ if result.lcr.Response.Response != nil {
+ sc = result.lcr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "accountName", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.CertificateClient", "ListByBatchAccount", err.Error())
+ }
+
+ result.fn = client.listByBatchAccountNextResults
+ req, err := client.ListByBatchAccountPreparer(ctx, resourceGroupName, accountName, maxresults, selectParameter, filter)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "ListByBatchAccount", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByBatchAccountSender(req)
+ if err != nil {
+ result.lcr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "ListByBatchAccount", resp, "Failure sending request")
+ return
+ }
+
+ result.lcr, err = client.ListByBatchAccountResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "ListByBatchAccount", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByBatchAccountPreparer prepares the ListByBatchAccount request.
+func (client CertificateClient) ListByBatchAccountPreparer(ctx context.Context, resourceGroupName string, accountName string, maxresults *int32, selectParameter string, filter string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if maxresults != nil {
+ queryParameters["maxresults"] = autorest.Encode("query", *maxresults)
+ }
+ if len(selectParameter) > 0 {
+ queryParameters["$select"] = autorest.Encode("query", selectParameter)
+ }
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/certificates", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByBatchAccountSender sends the ListByBatchAccount request. The method will close the
+// http.Response Body if it receives an error.
+func (client CertificateClient) ListByBatchAccountSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByBatchAccountResponder handles the response to the ListByBatchAccount request. The method always
+// closes the http.Response Body.
+func (client CertificateClient) ListByBatchAccountResponder(resp *http.Response) (result ListCertificatesResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByBatchAccountNextResults retrieves the next set of results, if any.
+func (client CertificateClient) listByBatchAccountNextResults(ctx context.Context, lastResults ListCertificatesResult) (result ListCertificatesResult, err error) {
+ req, err := lastResults.listCertificatesResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "batch.CertificateClient", "listByBatchAccountNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByBatchAccountSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "batch.CertificateClient", "listByBatchAccountNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByBatchAccountResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "listByBatchAccountNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByBatchAccountComplete enumerates all values, automatically crossing page boundaries as required.
+func (client CertificateClient) ListByBatchAccountComplete(ctx context.Context, resourceGroupName string, accountName string, maxresults *int32, selectParameter string, filter string) (result ListCertificatesResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CertificateClient.ListByBatchAccount")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByBatchAccount(ctx, resourceGroupName, accountName, maxresults, selectParameter, filter)
+ return
+}
+
+// Update updates the properties of an existing certificate.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the Batch account.
+// accountName - the name of the Batch account.
+// certificateName - the identifier for the certificate. This must be made up of algorithm and thumbprint
+// separated by a dash, and must match the certificate data in the request. For example SHA1-a3d1c5.
+// parameters - certificate entity to update.
+// ifMatch - the entity state (ETag) version of the certificate to update. This value can be omitted or set to
+// "*" to apply the operation unconditionally.
+func (client CertificateClient) Update(ctx context.Context, resourceGroupName string, accountName string, certificateName string, parameters CertificateCreateOrUpdateParameters, ifMatch string) (result Certificate, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CertificateClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "accountName", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}},
+ {TargetValue: certificateName,
+ Constraints: []validation.Constraint{{Target: "certificateName", Name: validation.MaxLength, Rule: 45, Chain: nil},
+ {Target: "certificateName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "certificateName", Name: validation.Pattern, Rule: `^[\w]+-[\w]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.CertificateClient", "Update", err.Error())
+ }
+
+ req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, certificateName, parameters, ifMatch)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "Update", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.UpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "Update", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.UpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.CertificateClient", "Update", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// UpdatePreparer prepares the Update request.
+func (client CertificateClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, certificateName string, parameters CertificateCreateOrUpdateParameters, ifMatch string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "certificateName": autorest.Encode("path", certificateName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/certificates/{certificateName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateSender sends the Update request. The method will close the
+// http.Response Body if it receives an error.
+func (client CertificateClient) UpdateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// UpdateResponder handles the response to the Update request. The method always
+// closes the http.Response Body.
+func (client CertificateClient) UpdateResponder(resp *http.Response) (result Certificate, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/batch/mgmt/2019-08-01/batch/client.go b/services/batch/mgmt/2019-08-01/batch/client.go
new file mode 100644
index 000000000000..ce20ef8148f4
--- /dev/null
+++ b/services/batch/mgmt/2019-08-01/batch/client.go
@@ -0,0 +1,51 @@
+// Package batch implements the Azure ARM Batch service API version 2019-08-01.
+//
+//
+package batch
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/Azure/go-autorest/autorest"
+)
+
+const (
+ // DefaultBaseURI is the default URI used for the service Batch
+ DefaultBaseURI = "https://management.azure.com"
+)
+
+// BaseClient is the base client for Batch.
+type BaseClient struct {
+ autorest.Client
+ BaseURI string
+ SubscriptionID string
+}
+
+// New creates an instance of the BaseClient client.
+func New(subscriptionID string) BaseClient {
+ return NewWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewWithBaseURI creates an instance of the BaseClient client.
+func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient {
+ return BaseClient{
+ Client: autorest.NewClientWithUserAgent(UserAgent()),
+ BaseURI: baseURI,
+ SubscriptionID: subscriptionID,
+ }
+}
diff --git a/services/batch/mgmt/2019-08-01/batch/location.go b/services/batch/mgmt/2019-08-01/batch/location.go
new file mode 100644
index 000000000000..16a584ad4fd0
--- /dev/null
+++ b/services/batch/mgmt/2019-08-01/batch/location.go
@@ -0,0 +1,202 @@
+package batch
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// LocationClient is the client for the Location methods of the Batch service.
+type LocationClient struct {
+ BaseClient
+}
+
+// NewLocationClient creates an instance of the LocationClient client.
+func NewLocationClient(subscriptionID string) LocationClient {
+ return NewLocationClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewLocationClientWithBaseURI creates an instance of the LocationClient client.
+func NewLocationClientWithBaseURI(baseURI string, subscriptionID string) LocationClient {
+ return LocationClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CheckNameAvailability checks whether the Batch account name is available in the specified region.
+// Parameters:
+// locationName - the desired region for the name check.
+// parameters - properties needed to check the availability of a name.
+func (client LocationClient) CheckNameAvailability(ctx context.Context, locationName string, parameters CheckNameAvailabilityParameters) (result CheckNameAvailabilityResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/LocationClient.CheckNameAvailability")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters.Name", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.Type", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.LocationClient", "CheckNameAvailability", err.Error())
+ }
+
+ req, err := client.CheckNameAvailabilityPreparer(ctx, locationName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.LocationClient", "CheckNameAvailability", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CheckNameAvailabilitySender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.LocationClient", "CheckNameAvailability", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CheckNameAvailabilityResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.LocationClient", "CheckNameAvailability", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CheckNameAvailabilityPreparer prepares the CheckNameAvailability request.
+func (client LocationClient) CheckNameAvailabilityPreparer(ctx context.Context, locationName string, parameters CheckNameAvailabilityParameters) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "locationName": autorest.Encode("path", locationName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Batch/locations/{locationName}/checkNameAvailability", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the
+// http.Response Body if it receives an error.
+func (client LocationClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always
+// closes the http.Response Body.
+func (client LocationClient) CheckNameAvailabilityResponder(resp *http.Response) (result CheckNameAvailabilityResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetQuotas gets the Batch service quotas for the specified subscription at the given location.
+// Parameters:
+// locationName - the region for which to retrieve Batch service quotas.
+func (client LocationClient) GetQuotas(ctx context.Context, locationName string) (result LocationQuota, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/LocationClient.GetQuotas")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetQuotasPreparer(ctx, locationName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.LocationClient", "GetQuotas", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetQuotasSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.LocationClient", "GetQuotas", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetQuotasResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.LocationClient", "GetQuotas", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetQuotasPreparer prepares the GetQuotas request.
+func (client LocationClient) GetQuotasPreparer(ctx context.Context, locationName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "locationName": autorest.Encode("path", locationName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Batch/locations/{locationName}/quotas", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetQuotasSender sends the GetQuotas request. The method will close the
+// http.Response Body if it receives an error.
+func (client LocationClient) GetQuotasSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetQuotasResponder handles the response to the GetQuotas request. The method always
+// closes the http.Response Body.
+func (client LocationClient) GetQuotasResponder(resp *http.Response) (result LocationQuota, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/batch/mgmt/2019-08-01/batch/models.go b/services/batch/mgmt/2019-08-01/batch/models.go
new file mode 100644
index 000000000000..45ff56b28ffc
--- /dev/null
+++ b/services/batch/mgmt/2019-08-01/batch/models.go
@@ -0,0 +1,2780 @@
+package batch
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "encoding/json"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/date"
+ "github.com/Azure/go-autorest/autorest/to"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// The package's fully qualified name.
+const fqdn = "github.com/Azure/azure-sdk-for-go/services/batch/mgmt/2019-08-01/batch"
+
+// AccountKeyType enumerates the values for account key type.
+type AccountKeyType string
+
+const (
+ // Primary The primary account key.
+ Primary AccountKeyType = "Primary"
+ // Secondary The secondary account key.
+ Secondary AccountKeyType = "Secondary"
+)
+
+// PossibleAccountKeyTypeValues returns an array of possible values for the AccountKeyType const type.
+func PossibleAccountKeyTypeValues() []AccountKeyType {
+ return []AccountKeyType{Primary, Secondary}
+}
+
+// AllocationState enumerates the values for allocation state.
+type AllocationState string
+
+const (
+ // Resizing The pool is resizing; that is, compute nodes are being added to or removed from the pool.
+ Resizing AllocationState = "Resizing"
+ // Steady The pool is not resizing. There are no changes to the number of nodes in the pool in progress. A
+ // pool enters this state when it is created and when no operations are being performed on the pool to
+ // change the number of nodes.
+ Steady AllocationState = "Steady"
+ // Stopping The pool was resizing, but the user has requested that the resize be stopped, but the stop
+ // request has not yet been completed.
+ Stopping AllocationState = "Stopping"
+)
+
+// PossibleAllocationStateValues returns an array of possible values for the AllocationState const type.
+func PossibleAllocationStateValues() []AllocationState {
+ return []AllocationState{Resizing, Steady, Stopping}
+}
+
+// AutoUserScope enumerates the values for auto user scope.
+type AutoUserScope string
+
+const (
+ // AutoUserScopePool Specifies that the task runs as the common auto user account which is created on every
+ // node in a pool.
+ AutoUserScopePool AutoUserScope = "Pool"
+ // AutoUserScopeTask Specifies that the service should create a new user for the task.
+ AutoUserScopeTask AutoUserScope = "Task"
+)
+
+// PossibleAutoUserScopeValues returns an array of possible values for the AutoUserScope const type.
+func PossibleAutoUserScopeValues() []AutoUserScope {
+ return []AutoUserScope{AutoUserScopePool, AutoUserScopeTask}
+}
+
+// CachingType enumerates the values for caching type.
+type CachingType string
+
+const (
+ // None The caching mode for the disk is not enabled.
+ None CachingType = "None"
+ // ReadOnly The caching mode for the disk is read only.
+ ReadOnly CachingType = "ReadOnly"
+ // ReadWrite The caching mode for the disk is read and write.
+ ReadWrite CachingType = "ReadWrite"
+)
+
+// PossibleCachingTypeValues returns an array of possible values for the CachingType const type.
+func PossibleCachingTypeValues() []CachingType {
+ return []CachingType{None, ReadOnly, ReadWrite}
+}
+
+// CertificateFormat enumerates the values for certificate format.
+type CertificateFormat string
+
+const (
+ // Cer The certificate is a base64-encoded X.509 certificate.
+ Cer CertificateFormat = "Cer"
+ // Pfx The certificate is a PFX (PKCS#12) formatted certificate or certificate chain.
+ Pfx CertificateFormat = "Pfx"
+)
+
+// PossibleCertificateFormatValues returns an array of possible values for the CertificateFormat const type.
+func PossibleCertificateFormatValues() []CertificateFormat {
+ return []CertificateFormat{Cer, Pfx}
+}
+
+// CertificateProvisioningState enumerates the values for certificate provisioning state.
+type CertificateProvisioningState string
+
+const (
+ // Deleting The user has requested that the certificate be deleted, but the delete operation has not yet
+ // completed. You may not reference the certificate when creating or updating pools.
+ Deleting CertificateProvisioningState = "Deleting"
+ // Failed The user requested that the certificate be deleted, but there are pools that still have
+ // references to the certificate, or it is still installed on one or more compute nodes. (The latter can
+ // occur if the certificate has been removed from the pool, but the node has not yet restarted. Nodes
+ // refresh their certificates only when they restart.) You may use the cancel certificate delete operation
+ // to cancel the delete, or the delete certificate operation to retry the delete.
+ Failed CertificateProvisioningState = "Failed"
+ // Succeeded The certificate is available for use in pools.
+ Succeeded CertificateProvisioningState = "Succeeded"
+)
+
+// PossibleCertificateProvisioningStateValues returns an array of possible values for the CertificateProvisioningState const type.
+func PossibleCertificateProvisioningStateValues() []CertificateProvisioningState {
+ return []CertificateProvisioningState{Deleting, Failed, Succeeded}
+}
+
+// CertificateStoreLocation enumerates the values for certificate store location.
+type CertificateStoreLocation string
+
+const (
+ // CurrentUser Certificates should be installed to the CurrentUser certificate store.
+ CurrentUser CertificateStoreLocation = "CurrentUser"
+ // LocalMachine Certificates should be installed to the LocalMachine certificate store.
+ LocalMachine CertificateStoreLocation = "LocalMachine"
+)
+
+// PossibleCertificateStoreLocationValues returns an array of possible values for the CertificateStoreLocation const type.
+func PossibleCertificateStoreLocationValues() []CertificateStoreLocation {
+ return []CertificateStoreLocation{CurrentUser, LocalMachine}
+}
+
+// CertificateVisibility enumerates the values for certificate visibility.
+type CertificateVisibility string
+
+const (
+ // CertificateVisibilityRemoteUser The certificate should be visible to the user accounts under which users
+ // remotely access the node.
+ CertificateVisibilityRemoteUser CertificateVisibility = "RemoteUser"
+ // CertificateVisibilityStartTask The certificate should be visible to the user account under which the
+ // start task is run. Note that if AutoUser Scope is Pool for both the StartTask and a Task, this
+ // certificate will be visible to the Task as well.
+ CertificateVisibilityStartTask CertificateVisibility = "StartTask"
+ // CertificateVisibilityTask The certificate should be visible to the user accounts under which job tasks
+ // are run.
+ CertificateVisibilityTask CertificateVisibility = "Task"
+)
+
+// PossibleCertificateVisibilityValues returns an array of possible values for the CertificateVisibility const type.
+func PossibleCertificateVisibilityValues() []CertificateVisibility {
+ return []CertificateVisibility{CertificateVisibilityRemoteUser, CertificateVisibilityStartTask, CertificateVisibilityTask}
+}
+
+// ComputeNodeDeallocationOption enumerates the values for compute node deallocation option.
+type ComputeNodeDeallocationOption string
+
+const (
+ // Requeue Terminate running task processes and requeue the tasks. The tasks will run again when a node is
+ // available. Remove nodes as soon as tasks have been terminated.
+ Requeue ComputeNodeDeallocationOption = "Requeue"
+ // RetainedData Allow currently running tasks to complete, then wait for all task data retention periods to
+ // expire. Schedule no new tasks while waiting. Remove nodes when all task retention periods have expired.
+ RetainedData ComputeNodeDeallocationOption = "RetainedData"
+ // TaskCompletion Allow currently running tasks to complete. Schedule no new tasks while waiting. Remove
+ // nodes when all tasks have completed.
+ TaskCompletion ComputeNodeDeallocationOption = "TaskCompletion"
+ // Terminate Terminate running tasks. The tasks will be completed with failureInfo indicating that they
+ // were terminated, and will not run again. Remove nodes as soon as tasks have been terminated.
+ Terminate ComputeNodeDeallocationOption = "Terminate"
+)
+
+// PossibleComputeNodeDeallocationOptionValues returns an array of possible values for the ComputeNodeDeallocationOption const type.
+func PossibleComputeNodeDeallocationOptionValues() []ComputeNodeDeallocationOption {
+ return []ComputeNodeDeallocationOption{Requeue, RetainedData, TaskCompletion, Terminate}
+}
+
+// ComputeNodeFillType enumerates the values for compute node fill type.
+type ComputeNodeFillType string
+
+const (
+ // Pack As many tasks as possible (maxTasksPerNode) should be assigned to each node in the pool before any
+ // tasks are assigned to the next node in the pool.
+ Pack ComputeNodeFillType = "Pack"
+ // Spread Tasks should be assigned evenly across all nodes in the pool.
+ Spread ComputeNodeFillType = "Spread"
+)
+
+// PossibleComputeNodeFillTypeValues returns an array of possible values for the ComputeNodeFillType const type.
+func PossibleComputeNodeFillTypeValues() []ComputeNodeFillType {
+ return []ComputeNodeFillType{Pack, Spread}
+}
+
+// ContainerWorkingDirectory enumerates the values for container working directory.
+type ContainerWorkingDirectory string
+
+const (
+ // ContainerImageDefault Using container image defined working directory. Beware that this directory will
+ // not contain the resource files downloaded by Batch.
+ ContainerImageDefault ContainerWorkingDirectory = "ContainerImageDefault"
+ // TaskWorkingDirectory Use the standard Batch service task working directory, which will contain the Task
+ // resource files populated by Batch.
+ TaskWorkingDirectory ContainerWorkingDirectory = "TaskWorkingDirectory"
+)
+
+// PossibleContainerWorkingDirectoryValues returns an array of possible values for the ContainerWorkingDirectory const type.
+func PossibleContainerWorkingDirectoryValues() []ContainerWorkingDirectory {
+ return []ContainerWorkingDirectory{ContainerImageDefault, TaskWorkingDirectory}
+}
+
+// ElevationLevel enumerates the values for elevation level.
+type ElevationLevel string
+
+const (
+ // Admin The user is a user with elevated access and operates with full Administrator permissions.
+ Admin ElevationLevel = "Admin"
+ // NonAdmin The user is a standard user without elevated access.
+ NonAdmin ElevationLevel = "NonAdmin"
+)
+
+// PossibleElevationLevelValues returns an array of possible values for the ElevationLevel const type.
+func PossibleElevationLevelValues() []ElevationLevel {
+ return []ElevationLevel{Admin, NonAdmin}
+}
+
+// InboundEndpointProtocol enumerates the values for inbound endpoint protocol.
+type InboundEndpointProtocol string
+
+const (
+ // TCP Use TCP for the endpoint.
+ TCP InboundEndpointProtocol = "TCP"
+ // UDP Use UDP for the endpoint.
+ UDP InboundEndpointProtocol = "UDP"
+)
+
+// PossibleInboundEndpointProtocolValues returns an array of possible values for the InboundEndpointProtocol const type.
+func PossibleInboundEndpointProtocolValues() []InboundEndpointProtocol {
+ return []InboundEndpointProtocol{TCP, UDP}
+}
+
+// InterNodeCommunicationState enumerates the values for inter node communication state.
+type InterNodeCommunicationState string
+
+const (
+ // Disabled Disable network communication between virtual machines.
+ Disabled InterNodeCommunicationState = "Disabled"
+ // Enabled Enable network communication between virtual machines.
+ Enabled InterNodeCommunicationState = "Enabled"
+)
+
+// PossibleInterNodeCommunicationStateValues returns an array of possible values for the InterNodeCommunicationState const type.
+func PossibleInterNodeCommunicationStateValues() []InterNodeCommunicationState {
+ return []InterNodeCommunicationState{Disabled, Enabled}
+}
+
+// LoginMode enumerates the values for login mode.
+type LoginMode string
+
+const (
+ // Batch The LOGON32_LOGON_BATCH Win32 login mode. The batch login mode is recommended for long running
+ // parallel processes.
+ Batch LoginMode = "Batch"
+ // Interactive The LOGON32_LOGON_INTERACTIVE Win32 login mode. Some applications require having permissions
+ // associated with the interactive login mode. If this is the case for an application used in your task,
+ // then this option is recommended.
+ Interactive LoginMode = "Interactive"
+)
+
+// PossibleLoginModeValues returns an array of possible values for the LoginMode const type.
+func PossibleLoginModeValues() []LoginMode {
+ return []LoginMode{Batch, Interactive}
+}
+
+// NameAvailabilityReason enumerates the values for name availability reason.
+type NameAvailabilityReason string
+
+const (
+ // AlreadyExists The requested name is already in use.
+ AlreadyExists NameAvailabilityReason = "AlreadyExists"
+ // Invalid The requested name is invalid.
+ Invalid NameAvailabilityReason = "Invalid"
+)
+
+// PossibleNameAvailabilityReasonValues returns an array of possible values for the NameAvailabilityReason const type.
+func PossibleNameAvailabilityReasonValues() []NameAvailabilityReason {
+ return []NameAvailabilityReason{AlreadyExists, Invalid}
+}
+
+// NetworkSecurityGroupRuleAccess enumerates the values for network security group rule access.
+type NetworkSecurityGroupRuleAccess string
+
+const (
+ // Allow Allow access.
+ Allow NetworkSecurityGroupRuleAccess = "Allow"
+ // Deny Deny access.
+ Deny NetworkSecurityGroupRuleAccess = "Deny"
+)
+
+// PossibleNetworkSecurityGroupRuleAccessValues returns an array of possible values for the NetworkSecurityGroupRuleAccess const type.
+func PossibleNetworkSecurityGroupRuleAccessValues() []NetworkSecurityGroupRuleAccess {
+ return []NetworkSecurityGroupRuleAccess{Allow, Deny}
+}
+
+// PackageState enumerates the values for package state.
+type PackageState string
+
+const (
+ // Active The application package is ready for use.
+ Active PackageState = "Active"
+ // Pending The application package has been created but has not yet been activated.
+ Pending PackageState = "Pending"
+)
+
+// PossiblePackageStateValues returns an array of possible values for the PackageState const type.
+func PossiblePackageStateValues() []PackageState {
+ return []PackageState{Active, Pending}
+}
+
+// PoolAllocationMode enumerates the values for pool allocation mode.
+type PoolAllocationMode string
+
+const (
+ // BatchService Pools will be allocated in subscriptions owned by the Batch service.
+ BatchService PoolAllocationMode = "BatchService"
+ // UserSubscription Pools will be allocated in a subscription owned by the user.
+ UserSubscription PoolAllocationMode = "UserSubscription"
+)
+
+// PossiblePoolAllocationModeValues returns an array of possible values for the PoolAllocationMode const type.
+func PossiblePoolAllocationModeValues() []PoolAllocationMode {
+ return []PoolAllocationMode{BatchService, UserSubscription}
+}
+
+// PoolProvisioningState enumerates the values for pool provisioning state.
+type PoolProvisioningState string
+
+const (
+ // PoolProvisioningStateDeleting The user has requested that the pool be deleted, but the delete operation
+ // has not yet completed.
+ PoolProvisioningStateDeleting PoolProvisioningState = "Deleting"
+ // PoolProvisioningStateSucceeded The pool is available to run tasks subject to the availability of compute
+ // nodes.
+ PoolProvisioningStateSucceeded PoolProvisioningState = "Succeeded"
+)
+
+// PossiblePoolProvisioningStateValues returns an array of possible values for the PoolProvisioningState const type.
+func PossiblePoolProvisioningStateValues() []PoolProvisioningState {
+ return []PoolProvisioningState{PoolProvisioningStateDeleting, PoolProvisioningStateSucceeded}
+}
+
+// ProvisioningState enumerates the values for provisioning state.
+type ProvisioningState string
+
+const (
+ // ProvisioningStateCancelled The last operation for the account is cancelled.
+ ProvisioningStateCancelled ProvisioningState = "Cancelled"
+ // ProvisioningStateCreating The account is being created.
+ ProvisioningStateCreating ProvisioningState = "Creating"
+ // ProvisioningStateDeleting The account is being deleted.
+ ProvisioningStateDeleting ProvisioningState = "Deleting"
+ // ProvisioningStateFailed The last operation for the account is failed.
+ ProvisioningStateFailed ProvisioningState = "Failed"
+ // ProvisioningStateInvalid The account is in an invalid state.
+ ProvisioningStateInvalid ProvisioningState = "Invalid"
+ // ProvisioningStateSucceeded The account has been created and is ready for use.
+ ProvisioningStateSucceeded ProvisioningState = "Succeeded"
+)
+
+// PossibleProvisioningStateValues returns an array of possible values for the ProvisioningState const type.
+func PossibleProvisioningStateValues() []ProvisioningState {
+ return []ProvisioningState{ProvisioningStateCancelled, ProvisioningStateCreating, ProvisioningStateDeleting, ProvisioningStateFailed, ProvisioningStateInvalid, ProvisioningStateSucceeded}
+}
+
+// StorageAccountType enumerates the values for storage account type.
+type StorageAccountType string
+
+const (
+ // PremiumLRS The data disk should use premium locally redundant storage.
+ PremiumLRS StorageAccountType = "Premium_LRS"
+ // StandardLRS The data disk should use standard locally redundant storage.
+ StandardLRS StorageAccountType = "Standard_LRS"
+)
+
+// PossibleStorageAccountTypeValues returns an array of possible values for the StorageAccountType const type.
+func PossibleStorageAccountTypeValues() []StorageAccountType {
+ return []StorageAccountType{PremiumLRS, StandardLRS}
+}
+
+// Account contains information about an Azure Batch account.
+type Account struct {
+ autorest.Response `json:"-"`
+ // AccountProperties - The properties associated with the account.
+ *AccountProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; The ID of the resource.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource.
+ Type *string `json:"type,omitempty"`
+ // Location - READ-ONLY; The location of the resource.
+ Location *string `json:"location,omitempty"`
+ // Tags - READ-ONLY; The tags of the resource.
+ Tags map[string]*string `json:"tags"`
+}
+
+// MarshalJSON is the custom marshaler for Account.
+func (a Account) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if a.AccountProperties != nil {
+ objectMap["properties"] = a.AccountProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for Account struct.
+func (a *Account) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var accountProperties AccountProperties
+ err = json.Unmarshal(*v, &accountProperties)
+ if err != nil {
+ return err
+ }
+ a.AccountProperties = &accountProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ a.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ a.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ a.Type = &typeVar
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ a.Location = &location
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ a.Tags = tags
+ }
+ }
+ }
+
+ return nil
+}
+
+// AccountCreateFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type AccountCreateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *AccountCreateFuture) Result(client AccountClient) (a Account, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.AccountCreateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("batch.AccountCreateFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if a.Response.Response, err = future.GetResult(sender); err == nil && a.Response.Response.StatusCode != http.StatusNoContent {
+ a, err = client.CreateResponder(a.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.AccountCreateFuture", "Result", a.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// AccountCreateParameters parameters supplied to the Create operation.
+type AccountCreateParameters struct {
+ // Location - The region in which to create the account.
+ Location *string `json:"location,omitempty"`
+ // Tags - The user-specified tags associated with the account.
+ Tags map[string]*string `json:"tags"`
+ // AccountCreateProperties - The properties of the Batch account.
+ *AccountCreateProperties `json:"properties,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for AccountCreateParameters.
+func (acp AccountCreateParameters) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if acp.Location != nil {
+ objectMap["location"] = acp.Location
+ }
+ if acp.Tags != nil {
+ objectMap["tags"] = acp.Tags
+ }
+ if acp.AccountCreateProperties != nil {
+ objectMap["properties"] = acp.AccountCreateProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for AccountCreateParameters struct.
+func (acp *AccountCreateParameters) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ acp.Location = &location
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ acp.Tags = tags
+ }
+ case "properties":
+ if v != nil {
+ var accountCreateProperties AccountCreateProperties
+ err = json.Unmarshal(*v, &accountCreateProperties)
+ if err != nil {
+ return err
+ }
+ acp.AccountCreateProperties = &accountCreateProperties
+ }
+ }
+ }
+
+ return nil
+}
+
+// AccountCreateProperties the properties of a Batch account.
+type AccountCreateProperties struct {
+ // AutoStorage - The properties related to the auto-storage account.
+ AutoStorage *AutoStorageBaseProperties `json:"autoStorage,omitempty"`
+ // PoolAllocationMode - The pool allocation mode also affects how clients may authenticate to the Batch Service API. If the mode is BatchService, clients may authenticate using access keys or Azure Active Directory. If the mode is UserSubscription, clients must use Azure Active Directory. The default is BatchService. Possible values include: 'BatchService', 'UserSubscription'
+ PoolAllocationMode PoolAllocationMode `json:"poolAllocationMode,omitempty"`
+ // KeyVaultReference - A reference to the Azure key vault associated with the Batch account.
+ KeyVaultReference *KeyVaultReference `json:"keyVaultReference,omitempty"`
+}
+
+// AccountDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type AccountDeleteFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *AccountDeleteFuture) Result(client AccountClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.AccountDeleteFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("batch.AccountDeleteFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// AccountKeys a set of Azure Batch account keys.
+type AccountKeys struct {
+ autorest.Response `json:"-"`
+ // AccountName - READ-ONLY; The Batch account name.
+ AccountName *string `json:"accountName,omitempty"`
+ // Primary - READ-ONLY; The primary key associated with the account.
+ Primary *string `json:"primary,omitempty"`
+ // Secondary - READ-ONLY; The secondary key associated with the account.
+ Secondary *string `json:"secondary,omitempty"`
+}
+
+// AccountListResult values returned by the List operation.
+type AccountListResult struct {
+ autorest.Response `json:"-"`
+ // Value - The collection of Batch accounts returned by the listing operation.
+ Value *[]Account `json:"value,omitempty"`
+ // NextLink - The continuation token.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// AccountListResultIterator provides access to a complete listing of Account values.
+type AccountListResultIterator struct {
+ i int
+ page AccountListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *AccountListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *AccountListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter AccountListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter AccountListResultIterator) Response() AccountListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter AccountListResultIterator) Value() Account {
+ if !iter.page.NotDone() {
+ return Account{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the AccountListResultIterator type.
+func NewAccountListResultIterator(page AccountListResultPage) AccountListResultIterator {
+ return AccountListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (alr AccountListResult) IsEmpty() bool {
+ return alr.Value == nil || len(*alr.Value) == 0
+}
+
+// accountListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (alr AccountListResult) accountListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if alr.NextLink == nil || len(to.String(alr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(alr.NextLink)))
+}
+
+// AccountListResultPage contains a page of Account values.
+type AccountListResultPage struct {
+ fn func(context.Context, AccountListResult) (AccountListResult, error)
+ alr AccountListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *AccountListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.alr)
+ if err != nil {
+ return err
+ }
+ page.alr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *AccountListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page AccountListResultPage) NotDone() bool {
+ return !page.alr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page AccountListResultPage) Response() AccountListResult {
+ return page.alr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page AccountListResultPage) Values() []Account {
+ if page.alr.IsEmpty() {
+ return nil
+ }
+ return *page.alr.Value
+}
+
+// Creates a new instance of the AccountListResultPage type.
+func NewAccountListResultPage(getNextPage func(context.Context, AccountListResult) (AccountListResult, error)) AccountListResultPage {
+ return AccountListResultPage{fn: getNextPage}
+}
+
+// AccountProperties account specific properties.
+type AccountProperties struct {
+ // AccountEndpoint - READ-ONLY; The account endpoint used to interact with the Batch service.
+ AccountEndpoint *string `json:"accountEndpoint,omitempty"`
+ // ProvisioningState - READ-ONLY; The provisioned state of the resource. Possible values include: 'ProvisioningStateInvalid', 'ProvisioningStateCreating', 'ProvisioningStateDeleting', 'ProvisioningStateSucceeded', 'ProvisioningStateFailed', 'ProvisioningStateCancelled'
+ ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
+ // PoolAllocationMode - READ-ONLY; Possible values include: 'BatchService', 'UserSubscription'
+ PoolAllocationMode PoolAllocationMode `json:"poolAllocationMode,omitempty"`
+ // KeyVaultReference - READ-ONLY
+ KeyVaultReference *KeyVaultReference `json:"keyVaultReference,omitempty"`
+ // AutoStorage - READ-ONLY
+ AutoStorage *AutoStorageProperties `json:"autoStorage,omitempty"`
+ // DedicatedCoreQuota - READ-ONLY; For accounts with PoolAllocationMode set to UserSubscription, quota is managed on the subscription so this value is not returned.
+ DedicatedCoreQuota *int32 `json:"dedicatedCoreQuota,omitempty"`
+ // LowPriorityCoreQuota - READ-ONLY; For accounts with PoolAllocationMode set to UserSubscription, quota is managed on the subscription so this value is not returned.
+ LowPriorityCoreQuota *int32 `json:"lowPriorityCoreQuota,omitempty"`
+ // DedicatedCoreQuotaPerVMFamily - READ-ONLY; A list of the dedicated core quota per Virtual Machine family for the Batch account. For accounts with PoolAllocationMode set to UserSubscription, quota is managed on the subscription so this value is not returned.
+ DedicatedCoreQuotaPerVMFamily *[]VirtualMachineFamilyCoreQuota `json:"dedicatedCoreQuotaPerVMFamily,omitempty"`
+ // DedicatedCoreQuotaPerVMFamilyEnforced - READ-ONLY; Batch is transitioning its core quota system for dedicated cores to be enforced per Virtual Machine family. During this transitional phase, the dedicated core quota per Virtual Machine family may not yet be enforced. If this flag is false, dedicated core quota is enforced via the old dedicatedCoreQuota property on the account and does not consider Virtual Machine family. If this flag is true, dedicated core quota is enforced via the dedicatedCoreQuotaPerVMFamily property on the account, and the old dedicatedCoreQuota does not apply.
+ DedicatedCoreQuotaPerVMFamilyEnforced *bool `json:"dedicatedCoreQuotaPerVMFamilyEnforced,omitempty"`
+ // PoolQuota - READ-ONLY
+ PoolQuota *int32 `json:"poolQuota,omitempty"`
+ // ActiveJobAndJobScheduleQuota - READ-ONLY
+ ActiveJobAndJobScheduleQuota *int32 `json:"activeJobAndJobScheduleQuota,omitempty"`
+}
+
+// AccountRegenerateKeyParameters parameters supplied to the RegenerateKey operation.
+type AccountRegenerateKeyParameters struct {
+ // KeyName - The type of account key to regenerate. Possible values include: 'Primary', 'Secondary'
+ KeyName AccountKeyType `json:"keyName,omitempty"`
+}
+
+// AccountUpdateParameters parameters for updating an Azure Batch account.
+type AccountUpdateParameters struct {
+ // Tags - The user-specified tags associated with the account.
+ Tags map[string]*string `json:"tags"`
+ // AccountUpdateProperties - The properties of the account.
+ *AccountUpdateProperties `json:"properties,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for AccountUpdateParameters.
+func (aup AccountUpdateParameters) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if aup.Tags != nil {
+ objectMap["tags"] = aup.Tags
+ }
+ if aup.AccountUpdateProperties != nil {
+ objectMap["properties"] = aup.AccountUpdateProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for AccountUpdateParameters struct.
+func (aup *AccountUpdateParameters) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ aup.Tags = tags
+ }
+ case "properties":
+ if v != nil {
+ var accountUpdateProperties AccountUpdateProperties
+ err = json.Unmarshal(*v, &accountUpdateProperties)
+ if err != nil {
+ return err
+ }
+ aup.AccountUpdateProperties = &accountUpdateProperties
+ }
+ }
+ }
+
+ return nil
+}
+
+// AccountUpdateProperties the properties of a Batch account.
+type AccountUpdateProperties struct {
+ // AutoStorage - The properties related to the auto-storage account.
+ AutoStorage *AutoStorageBaseProperties `json:"autoStorage,omitempty"`
+}
+
+// ActivateApplicationPackageParameters parameters for an activating an application package.
+type ActivateApplicationPackageParameters struct {
+ // Format - The format of the application package binary file.
+ Format *string `json:"format,omitempty"`
+}
+
+// Application contains information about an application in a Batch account.
+type Application struct {
+ autorest.Response `json:"-"`
+ // ApplicationProperties - The properties associated with the Application.
+ *ApplicationProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; The ID of the resource.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource.
+ Type *string `json:"type,omitempty"`
+ // Etag - READ-ONLY; The ETag of the resource, used for concurrency statements.
+ Etag *string `json:"etag,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for Application.
+func (a Application) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if a.ApplicationProperties != nil {
+ objectMap["properties"] = a.ApplicationProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for Application struct.
+func (a *Application) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var applicationProperties ApplicationProperties
+ err = json.Unmarshal(*v, &applicationProperties)
+ if err != nil {
+ return err
+ }
+ a.ApplicationProperties = &applicationProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ a.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ a.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ a.Type = &typeVar
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ a.Etag = &etag
+ }
+ }
+ }
+
+ return nil
+}
+
+// ApplicationPackage an application package which represents a particular version of an application.
+type ApplicationPackage struct {
+ autorest.Response `json:"-"`
+ // ApplicationPackageProperties - The properties associated with the Application Package.
+ *ApplicationPackageProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; The ID of the resource.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource.
+ Type *string `json:"type,omitempty"`
+ // Etag - READ-ONLY; The ETag of the resource, used for concurrency statements.
+ Etag *string `json:"etag,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ApplicationPackage.
+func (ap ApplicationPackage) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if ap.ApplicationPackageProperties != nil {
+ objectMap["properties"] = ap.ApplicationPackageProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ApplicationPackage struct.
+func (ap *ApplicationPackage) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var applicationPackageProperties ApplicationPackageProperties
+ err = json.Unmarshal(*v, &applicationPackageProperties)
+ if err != nil {
+ return err
+ }
+ ap.ApplicationPackageProperties = &applicationPackageProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ ap.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ ap.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ ap.Type = &typeVar
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ ap.Etag = &etag
+ }
+ }
+ }
+
+ return nil
+}
+
+// ApplicationPackageProperties properties of an application package
+type ApplicationPackageProperties struct {
+ // State - READ-ONLY; The current state of the application package. Possible values include: 'Pending', 'Active'
+ State PackageState `json:"state,omitempty"`
+ // Format - READ-ONLY; The format of the application package, if the package is active.
+ Format *string `json:"format,omitempty"`
+ // StorageURL - READ-ONLY; The URL for the application package in Azure Storage.
+ StorageURL *string `json:"storageUrl,omitempty"`
+ // StorageURLExpiry - READ-ONLY; The UTC time at which the Azure Storage URL will expire.
+ StorageURLExpiry *date.Time `json:"storageUrlExpiry,omitempty"`
+ // LastActivationTime - READ-ONLY; The time at which the package was last activated, if the package is active.
+ LastActivationTime *date.Time `json:"lastActivationTime,omitempty"`
+}
+
+// ApplicationPackageReference ...
+type ApplicationPackageReference struct {
+ ID *string `json:"id,omitempty"`
+ // Version - If this is omitted, and no default version is specified for this application, the request fails with the error code InvalidApplicationPackageReferences. If you are calling the REST API directly, the HTTP status code is 409.
+ Version *string `json:"version,omitempty"`
+}
+
+// ApplicationProperties the properties associated with the Application.
+type ApplicationProperties struct {
+ // DisplayName - The display name for the application.
+ DisplayName *string `json:"displayName,omitempty"`
+ // AllowUpdates - A value indicating whether packages within the application may be overwritten using the same version string.
+ AllowUpdates *bool `json:"allowUpdates,omitempty"`
+ // DefaultVersion - The package to use if a client requests the application but does not specify a version. This property can only be set to the name of an existing package.
+ DefaultVersion *string `json:"defaultVersion,omitempty"`
+}
+
+// AutoScaleRun ...
+type AutoScaleRun struct {
+ EvaluationTime *date.Time `json:"evaluationTime,omitempty"`
+ // Results - Each variable value is returned in the form $variable=value, and variables are separated by semicolons.
+ Results *string `json:"results,omitempty"`
+ Error *AutoScaleRunError `json:"error,omitempty"`
+}
+
+// AutoScaleRunError ...
+type AutoScaleRunError struct {
+ // Code - An identifier for the error. Codes are invariant and are intended to be consumed programmatically.
+ Code *string `json:"code,omitempty"`
+ // Message - A message describing the error, intended to be suitable for display in a user interface.
+ Message *string `json:"message,omitempty"`
+ Details *[]AutoScaleRunError `json:"details,omitempty"`
+}
+
+// AutoScaleSettings ...
+type AutoScaleSettings struct {
+ Formula *string `json:"formula,omitempty"`
+ // EvaluationInterval - If omitted, the default value is 15 minutes (PT15M).
+ EvaluationInterval *string `json:"evaluationInterval,omitempty"`
+}
+
+// AutoStorageBaseProperties the properties related to the auto-storage account.
+type AutoStorageBaseProperties struct {
+ // StorageAccountID - The resource ID of the storage account to be used for auto-storage account.
+ StorageAccountID *string `json:"storageAccountId,omitempty"`
+}
+
+// AutoStorageProperties contains information about the auto-storage account associated with a Batch
+// account.
+type AutoStorageProperties struct {
+ // LastKeySync - The UTC time at which storage keys were last synchronized with the Batch account.
+ LastKeySync *date.Time `json:"lastKeySync,omitempty"`
+ // StorageAccountID - The resource ID of the storage account to be used for auto-storage account.
+ StorageAccountID *string `json:"storageAccountId,omitempty"`
+}
+
+// AutoUserSpecification ...
+type AutoUserSpecification struct {
+ // Scope - The default value is Pool. If the pool is running Windows a value of Task should be specified if stricter isolation between tasks is required. For example, if the task mutates the registry in a way which could impact other tasks, or if certificates have been specified on the pool which should not be accessible by normal tasks but should be accessible by start tasks. Possible values include: 'AutoUserScopeTask', 'AutoUserScopePool'
+ Scope AutoUserScope `json:"scope,omitempty"`
+ // ElevationLevel - The default value is nonAdmin. Possible values include: 'NonAdmin', 'Admin'
+ ElevationLevel ElevationLevel `json:"elevationLevel,omitempty"`
+}
+
+// AzureBlobFileSystemConfiguration ...
+type AzureBlobFileSystemConfiguration struct {
+ AccountName *string `json:"accountName,omitempty"`
+ ContainerName *string `json:"containerName,omitempty"`
+ // AccountKey - This property is mutually exclusive with sasKey and one must be specified.
+ AccountKey *string `json:"accountKey,omitempty"`
+ // SasKey - This property is mutually exclusive with accountKey and one must be specified.
+ SasKey *string `json:"sasKey,omitempty"`
+ // BlobfuseOptions - These are 'net use' options in Windows and 'mount' options in Linux.
+ BlobfuseOptions *string `json:"blobfuseOptions,omitempty"`
+ // RelativeMountPath - All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable.
+ RelativeMountPath *string `json:"relativeMountPath,omitempty"`
+}
+
+// AzureFileShareConfiguration ...
+type AzureFileShareConfiguration struct {
+ AccountName *string `json:"accountName,omitempty"`
+ // AzureFileURL - This is of the form 'https://{account}.file.core.windows.net/'.
+ AzureFileURL *string `json:"azureFileUrl,omitempty"`
+ AccountKey *string `json:"accountKey,omitempty"`
+ // RelativeMountPath - All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable.
+ RelativeMountPath *string `json:"relativeMountPath,omitempty"`
+ // MountOptions - These are 'net use' options in Windows and 'mount' options in Linux.
+ MountOptions *string `json:"mountOptions,omitempty"`
+}
+
+// Certificate contains information about a certificate.
+type Certificate struct {
+ autorest.Response `json:"-"`
+ // CertificateProperties - The properties associated with the certificate.
+ *CertificateProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; The ID of the resource.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource.
+ Type *string `json:"type,omitempty"`
+ // Etag - READ-ONLY; The ETag of the resource, used for concurrency statements.
+ Etag *string `json:"etag,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for Certificate.
+func (c Certificate) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if c.CertificateProperties != nil {
+ objectMap["properties"] = c.CertificateProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for Certificate struct.
+func (c *Certificate) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var certificateProperties CertificateProperties
+ err = json.Unmarshal(*v, &certificateProperties)
+ if err != nil {
+ return err
+ }
+ c.CertificateProperties = &certificateProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ c.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ c.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ c.Type = &typeVar
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ c.Etag = &etag
+ }
+ }
+ }
+
+ return nil
+}
+
+// CertificateBaseProperties ...
+type CertificateBaseProperties struct {
+ // ThumbprintAlgorithm - This must match the first portion of the certificate name. Currently required to be 'SHA1'.
+ ThumbprintAlgorithm *string `json:"thumbprintAlgorithm,omitempty"`
+ // Thumbprint - This must match the thumbprint from the name.
+ Thumbprint *string `json:"thumbprint,omitempty"`
+ // Format - The format of the certificate - either Pfx or Cer. If omitted, the default is Pfx. Possible values include: 'Pfx', 'Cer'
+ Format CertificateFormat `json:"format,omitempty"`
+}
+
+// CertificateCreateFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type CertificateCreateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *CertificateCreateFuture) Result(client CertificateClient) (c Certificate, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.CertificateCreateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("batch.CertificateCreateFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if c.Response.Response, err = future.GetResult(sender); err == nil && c.Response.Response.StatusCode != http.StatusNoContent {
+ c, err = client.CreateResponder(c.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.CertificateCreateFuture", "Result", c.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// CertificateCreateOrUpdateParameters contains information about a certificate.
+type CertificateCreateOrUpdateParameters struct {
+ // CertificateCreateOrUpdateProperties - The properties associated with the certificate.
+ *CertificateCreateOrUpdateProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; The ID of the resource.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource.
+ Type *string `json:"type,omitempty"`
+ // Etag - READ-ONLY; The ETag of the resource, used for concurrency statements.
+ Etag *string `json:"etag,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for CertificateCreateOrUpdateParameters.
+func (ccoup CertificateCreateOrUpdateParameters) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if ccoup.CertificateCreateOrUpdateProperties != nil {
+ objectMap["properties"] = ccoup.CertificateCreateOrUpdateProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for CertificateCreateOrUpdateParameters struct.
+func (ccoup *CertificateCreateOrUpdateParameters) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var certificateCreateOrUpdateProperties CertificateCreateOrUpdateProperties
+ err = json.Unmarshal(*v, &certificateCreateOrUpdateProperties)
+ if err != nil {
+ return err
+ }
+ ccoup.CertificateCreateOrUpdateProperties = &certificateCreateOrUpdateProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ ccoup.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ ccoup.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ ccoup.Type = &typeVar
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ ccoup.Etag = &etag
+ }
+ }
+ }
+
+ return nil
+}
+
+// CertificateCreateOrUpdateProperties certificate properties for create operations
+type CertificateCreateOrUpdateProperties struct {
+ // Data - The maximum size is 10KB.
+ Data *string `json:"data,omitempty"`
+ // Password - This is required if the certificate format is pfx and must be omitted if the certificate format is cer.
+ Password *string `json:"password,omitempty"`
+ // ThumbprintAlgorithm - This must match the first portion of the certificate name. Currently required to be 'SHA1'.
+ ThumbprintAlgorithm *string `json:"thumbprintAlgorithm,omitempty"`
+ // Thumbprint - This must match the thumbprint from the name.
+ Thumbprint *string `json:"thumbprint,omitempty"`
+ // Format - The format of the certificate - either Pfx or Cer. If omitted, the default is Pfx. Possible values include: 'Pfx', 'Cer'
+ Format CertificateFormat `json:"format,omitempty"`
+}
+
+// CertificateDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type CertificateDeleteFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *CertificateDeleteFuture) Result(client CertificateClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.CertificateDeleteFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("batch.CertificateDeleteFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// CertificateProperties certificate properties.
+type CertificateProperties struct {
+ // ProvisioningState - READ-ONLY; Possible values include: 'Succeeded', 'Deleting', 'Failed'
+ ProvisioningState CertificateProvisioningState `json:"provisioningState,omitempty"`
+ // ProvisioningStateTransitionTime - READ-ONLY
+ ProvisioningStateTransitionTime *date.Time `json:"provisioningStateTransitionTime,omitempty"`
+ // PreviousProvisioningState - READ-ONLY; The previous provisioned state of the resource. Possible values include: 'Succeeded', 'Deleting', 'Failed'
+ PreviousProvisioningState CertificateProvisioningState `json:"previousProvisioningState,omitempty"`
+ // PreviousProvisioningStateTransitionTime - READ-ONLY
+ PreviousProvisioningStateTransitionTime *date.Time `json:"previousProvisioningStateTransitionTime,omitempty"`
+ // PublicData - READ-ONLY; The public key of the certificate.
+ PublicData *string `json:"publicData,omitempty"`
+ // DeleteCertificateError - READ-ONLY; This is only returned when the certificate provisioningState is 'Failed'.
+ DeleteCertificateError *DeleteCertificateError `json:"deleteCertificateError,omitempty"`
+ // ThumbprintAlgorithm - This must match the first portion of the certificate name. Currently required to be 'SHA1'.
+ ThumbprintAlgorithm *string `json:"thumbprintAlgorithm,omitempty"`
+ // Thumbprint - This must match the thumbprint from the name.
+ Thumbprint *string `json:"thumbprint,omitempty"`
+ // Format - The format of the certificate - either Pfx or Cer. If omitted, the default is Pfx. Possible values include: 'Pfx', 'Cer'
+ Format CertificateFormat `json:"format,omitempty"`
+}
+
+// CertificateReference ...
+type CertificateReference struct {
+ ID *string `json:"id,omitempty"`
+ // StoreLocation - The default value is currentUser. This property is applicable only for pools configured with Windows nodes (that is, created with cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows image reference). For Linux compute nodes, the certificates are stored in a directory inside the task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this location. For certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and certificates are placed in that directory. Possible values include: 'CurrentUser', 'LocalMachine'
+ StoreLocation CertificateStoreLocation `json:"storeLocation,omitempty"`
+ // StoreName - This property is applicable only for pools configured with Windows nodes (that is, created with cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows image reference). Common store names include: My, Root, CA, Trust, Disallowed, TrustedPeople, TrustedPublisher, AuthRoot, AddressBook, but any custom store name can also be used. The default value is My.
+ StoreName *string `json:"storeName,omitempty"`
+ Visibility *[]CertificateVisibility `json:"visibility,omitempty"`
+}
+
+// CheckNameAvailabilityParameters parameters for a check name availability request.
+type CheckNameAvailabilityParameters struct {
+ // Name - The name to check for availability
+ Name *string `json:"name,omitempty"`
+ // Type - The resource type. Must be set to Microsoft.Batch/batchAccounts
+ Type *string `json:"type,omitempty"`
+}
+
+// CheckNameAvailabilityResult the CheckNameAvailability operation response.
+type CheckNameAvailabilityResult struct {
+ autorest.Response `json:"-"`
+ // NameAvailable - READ-ONLY; Gets a boolean value that indicates whether the name is available for you to use. If true, the name is available. If false, the name has already been taken or invalid and cannot be used.
+ NameAvailable *bool `json:"nameAvailable,omitempty"`
+ // Reason - READ-ONLY; Gets the reason that a Batch account name could not be used. The Reason element is only returned if NameAvailable is false. Possible values include: 'Invalid', 'AlreadyExists'
+ Reason NameAvailabilityReason `json:"reason,omitempty"`
+ // Message - READ-ONLY; Gets an error message explaining the Reason value in more detail.
+ Message *string `json:"message,omitempty"`
+}
+
+// CIFSMountConfiguration ...
+type CIFSMountConfiguration struct {
+ Username *string `json:"username,omitempty"`
+ Source *string `json:"source,omitempty"`
+ // RelativeMountPath - All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable.
+ RelativeMountPath *string `json:"relativeMountPath,omitempty"`
+ // MountOptions - These are 'net use' options in Windows and 'mount' options in Linux.
+ MountOptions *string `json:"mountOptions,omitempty"`
+ Password *string `json:"password,omitempty"`
+}
+
+// CloudError an error response from the Batch service.
+type CloudError struct {
+ Error *CloudErrorBody `json:"error,omitempty"`
+}
+
+// CloudErrorBody an error response from the Batch service.
+type CloudErrorBody struct {
+ // Code - An identifier for the error. Codes are invariant and are intended to be consumed programmatically.
+ Code *string `json:"code,omitempty"`
+ // Message - A message describing the error, intended to be suitable for display in a user interface.
+ Message *string `json:"message,omitempty"`
+ // Target - The target of the particular error. For example, the name of the property in error.
+ Target *string `json:"target,omitempty"`
+ // Details - A list of additional details about the error.
+ Details *[]CloudErrorBody `json:"details,omitempty"`
+}
+
+// CloudServiceConfiguration ...
+type CloudServiceConfiguration struct {
+ // OsFamily - Possible values are: 2 - OS Family 2, equivalent to Windows Server 2008 R2 SP1. 3 - OS Family 3, equivalent to Windows Server 2012. 4 - OS Family 4, equivalent to Windows Server 2012 R2. 5 - OS Family 5, equivalent to Windows Server 2016. 6 - OS Family 6, equivalent to Windows Server 2019. For more information, see Azure Guest OS Releases (https://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix/#releases).
+ OsFamily *string `json:"osFamily,omitempty"`
+ // OsVersion - The default value is * which specifies the latest operating system version for the specified OS family.
+ OsVersion *string `json:"osVersion,omitempty"`
+}
+
+// ContainerConfiguration ...
+type ContainerConfiguration struct {
+ Type *string `json:"type,omitempty"`
+ // ContainerImageNames - This is the full image reference, as would be specified to "docker pull". An image will be sourced from the default Docker registry unless the image is fully qualified with an alternative registry.
+ ContainerImageNames *[]string `json:"containerImageNames,omitempty"`
+ // ContainerRegistries - If any images must be downloaded from a private registry which requires credentials, then those credentials must be provided here.
+ ContainerRegistries *[]ContainerRegistry `json:"containerRegistries,omitempty"`
+}
+
+// ContainerRegistry ...
+type ContainerRegistry struct {
+ // RegistryServer - If omitted, the default is "docker.io".
+ RegistryServer *string `json:"registryServer,omitempty"`
+ UserName *string `json:"username,omitempty"`
+ Password *string `json:"password,omitempty"`
+}
+
+// DataDisk settings which will be used by the data disks associated to Compute Nodes in the Pool. When
+// using attached data disks, you need to mount and format the disks from within a VM to use them.
+type DataDisk struct {
+ // Lun - The lun is used to uniquely identify each data disk. If attaching multiple disks, each should have a distinct lun.
+ Lun *int32 `json:"lun,omitempty"`
+ // Caching - Values are:
+ // none - The caching mode for the disk is not enabled.
+ // readOnly - The caching mode for the disk is read only.
+ // readWrite - The caching mode for the disk is read and write.
+ // The default value for caching is none. For information about the caching options see: https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. Possible values include: 'None', 'ReadOnly', 'ReadWrite'
+ Caching CachingType `json:"caching,omitempty"`
+ DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
+ // StorageAccountType - If omitted, the default is "Standard_LRS". Values are:
+ // Standard_LRS - The data disk should use standard locally redundant storage.
+ // Premium_LRS - The data disk should use premium locally redundant storage. Possible values include: 'StandardLRS', 'PremiumLRS'
+ StorageAccountType StorageAccountType `json:"storageAccountType,omitempty"`
+}
+
+// DeleteCertificateError an error response from the Batch service.
+type DeleteCertificateError struct {
+ // Code - An identifier for the error. Codes are invariant and are intended to be consumed programmatically.
+ Code *string `json:"code,omitempty"`
+ // Message - A message describing the error, intended to be suitable for display in a user interface.
+ Message *string `json:"message,omitempty"`
+ // Target - The target of the particular error. For example, the name of the property in error.
+ Target *string `json:"target,omitempty"`
+ // Details - A list of additional details about the error.
+ Details *[]DeleteCertificateError `json:"details,omitempty"`
+}
+
+// DeploymentConfiguration ...
+type DeploymentConfiguration struct {
+ // CloudServiceConfiguration - This property and virtualMachineConfiguration are mutually exclusive and one of the properties must be specified. This property cannot be specified if the Batch account was created with its poolAllocationMode property set to 'UserSubscription'.
+ CloudServiceConfiguration *CloudServiceConfiguration `json:"cloudServiceConfiguration,omitempty"`
+ // VirtualMachineConfiguration - This property and cloudServiceConfiguration are mutually exclusive and one of the properties must be specified.
+ VirtualMachineConfiguration *VirtualMachineConfiguration `json:"virtualMachineConfiguration,omitempty"`
+}
+
+// EnvironmentSetting ...
+type EnvironmentSetting struct {
+ Name *string `json:"name,omitempty"`
+ Value *string `json:"value,omitempty"`
+}
+
+// FixedScaleSettings ...
+type FixedScaleSettings struct {
+ // ResizeTimeout - The default value is 15 minutes. Timeout values use ISO 8601 format. For example, use PT10M for 10 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service rejects the request with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request).
+ ResizeTimeout *string `json:"resizeTimeout,omitempty"`
+ // TargetDedicatedNodes - At least one of targetDedicatedNodes, targetLowPriority nodes must be set.
+ TargetDedicatedNodes *int32 `json:"targetDedicatedNodes,omitempty"`
+ // TargetLowPriorityNodes - At least one of targetDedicatedNodes, targetLowPriority nodes must be set.
+ TargetLowPriorityNodes *int32 `json:"targetLowPriorityNodes,omitempty"`
+ // NodeDeallocationOption - If omitted, the default value is Requeue. Possible values include: 'Requeue', 'Terminate', 'TaskCompletion', 'RetainedData'
+ NodeDeallocationOption ComputeNodeDeallocationOption `json:"nodeDeallocationOption,omitempty"`
+}
+
+// ImageReference ...
+type ImageReference struct {
+ // Publisher - For example, Canonical or MicrosoftWindowsServer.
+ Publisher *string `json:"publisher,omitempty"`
+ // Offer - For example, UbuntuServer or WindowsServer.
+ Offer *string `json:"offer,omitempty"`
+ // Sku - For example, 18.04-LTS or 2019-Datacenter.
+ Sku *string `json:"sku,omitempty"`
+ // Version - A value of 'latest' can be specified to select the latest version of an image. If omitted, the default is 'latest'.
+ Version *string `json:"version,omitempty"`
+ // ID - This property is mutually exclusive with other properties. For Virtual Machine Image it must be in the same region and subscription as the Azure Batch account. For SIG image it must have replicas in the same region as the Azure Batch account. For information about the firewall settings for the Batch node agent to communicate with the Batch service see https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration.
+ ID *string `json:"id,omitempty"`
+}
+
+// InboundNatPool ...
+type InboundNatPool struct {
+ // Name - The name must be unique within a Batch pool, can contain letters, numbers, underscores, periods, and hyphens. Names must start with a letter or number, must end with a letter, number, or underscore, and cannot exceed 77 characters. If any invalid values are provided the request fails with HTTP status code 400.
+ Name *string `json:"name,omitempty"`
+ // Protocol - Possible values include: 'TCP', 'UDP'
+ Protocol InboundEndpointProtocol `json:"protocol,omitempty"`
+ // BackendPort - This must be unique within a Batch pool. Acceptable values are between 1 and 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400.
+ BackendPort *int32 `json:"backendPort,omitempty"`
+ // FrontendPortRangeStart - Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved. All ranges within a pool must be distinct and cannot overlap. If any reserved or overlapping values are provided the request fails with HTTP status code 400.
+ FrontendPortRangeStart *int32 `json:"frontendPortRangeStart,omitempty"`
+ // FrontendPortRangeEnd - Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved by the Batch service. All ranges within a pool must be distinct and cannot overlap. If any reserved or overlapping values are provided the request fails with HTTP status code 400.
+ FrontendPortRangeEnd *int32 `json:"frontendPortRangeEnd,omitempty"`
+ // NetworkSecurityGroupRules - The maximum number of rules that can be specified across all the endpoints on a Batch pool is 25. If no network security group rules are specified, a default rule will be created to allow inbound access to the specified backendPort. If the maximum number of network security group rules is exceeded the request fails with HTTP status code 400.
+ NetworkSecurityGroupRules *[]NetworkSecurityGroupRule `json:"networkSecurityGroupRules,omitempty"`
+}
+
+// KeyVaultReference identifies the Azure key vault associated with a Batch account.
+type KeyVaultReference struct {
+ // ID - The resource ID of the Azure key vault associated with the Batch account.
+ ID *string `json:"id,omitempty"`
+ // URL - The URL of the Azure key vault associated with the Batch account.
+ URL *string `json:"url,omitempty"`
+}
+
+// LinuxUserConfiguration ...
+type LinuxUserConfiguration struct {
+ // UID - The uid and gid properties must be specified together or not at all. If not specified the underlying operating system picks the uid.
+ UID *int32 `json:"uid,omitempty"`
+ // Gid - The uid and gid properties must be specified together or not at all. If not specified the underlying operating system picks the gid.
+ Gid *int32 `json:"gid,omitempty"`
+ // SSHPrivateKey - The private key must not be password protected. The private key is used to automatically configure asymmetric-key based authentication for SSH between nodes in a Linux pool when the pool's enableInterNodeCommunication property is true (it is ignored if enableInterNodeCommunication is false). It does this by placing the key pair into the user's .ssh directory. If not specified, password-less SSH is not configured between nodes (no modification of the user's .ssh directory is done).
+ SSHPrivateKey *string `json:"sshPrivateKey,omitempty"`
+}
+
+// ListApplicationPackagesResult the result of performing list application packages.
+type ListApplicationPackagesResult struct {
+ autorest.Response `json:"-"`
+ // Value - The list of application packages.
+ Value *[]ApplicationPackage `json:"value,omitempty"`
+ // NextLink - The URL to get the next set of results.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// ListApplicationPackagesResultIterator provides access to a complete listing of ApplicationPackage
+// values.
+type ListApplicationPackagesResultIterator struct {
+ i int
+ page ListApplicationPackagesResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *ListApplicationPackagesResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ListApplicationPackagesResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *ListApplicationPackagesResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter ListApplicationPackagesResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter ListApplicationPackagesResultIterator) Response() ListApplicationPackagesResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter ListApplicationPackagesResultIterator) Value() ApplicationPackage {
+ if !iter.page.NotDone() {
+ return ApplicationPackage{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the ListApplicationPackagesResultIterator type.
+func NewListApplicationPackagesResultIterator(page ListApplicationPackagesResultPage) ListApplicationPackagesResultIterator {
+ return ListApplicationPackagesResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (lapr ListApplicationPackagesResult) IsEmpty() bool {
+ return lapr.Value == nil || len(*lapr.Value) == 0
+}
+
+// listApplicationPackagesResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (lapr ListApplicationPackagesResult) listApplicationPackagesResultPreparer(ctx context.Context) (*http.Request, error) {
+ if lapr.NextLink == nil || len(to.String(lapr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(lapr.NextLink)))
+}
+
+// ListApplicationPackagesResultPage contains a page of ApplicationPackage values.
+type ListApplicationPackagesResultPage struct {
+ fn func(context.Context, ListApplicationPackagesResult) (ListApplicationPackagesResult, error)
+ lapr ListApplicationPackagesResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *ListApplicationPackagesResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ListApplicationPackagesResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.lapr)
+ if err != nil {
+ return err
+ }
+ page.lapr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *ListApplicationPackagesResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page ListApplicationPackagesResultPage) NotDone() bool {
+ return !page.lapr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page ListApplicationPackagesResultPage) Response() ListApplicationPackagesResult {
+ return page.lapr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page ListApplicationPackagesResultPage) Values() []ApplicationPackage {
+ if page.lapr.IsEmpty() {
+ return nil
+ }
+ return *page.lapr.Value
+}
+
+// Creates a new instance of the ListApplicationPackagesResultPage type.
+func NewListApplicationPackagesResultPage(getNextPage func(context.Context, ListApplicationPackagesResult) (ListApplicationPackagesResult, error)) ListApplicationPackagesResultPage {
+ return ListApplicationPackagesResultPage{fn: getNextPage}
+}
+
+// ListApplicationsResult the result of performing list applications.
+type ListApplicationsResult struct {
+ autorest.Response `json:"-"`
+ // Value - The list of applications.
+ Value *[]Application `json:"value,omitempty"`
+ // NextLink - The URL to get the next set of results.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// ListApplicationsResultIterator provides access to a complete listing of Application values.
+type ListApplicationsResultIterator struct {
+ i int
+ page ListApplicationsResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *ListApplicationsResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ListApplicationsResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *ListApplicationsResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter ListApplicationsResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter ListApplicationsResultIterator) Response() ListApplicationsResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter ListApplicationsResultIterator) Value() Application {
+ if !iter.page.NotDone() {
+ return Application{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the ListApplicationsResultIterator type.
+func NewListApplicationsResultIterator(page ListApplicationsResultPage) ListApplicationsResultIterator {
+ return ListApplicationsResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (lar ListApplicationsResult) IsEmpty() bool {
+ return lar.Value == nil || len(*lar.Value) == 0
+}
+
+// listApplicationsResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (lar ListApplicationsResult) listApplicationsResultPreparer(ctx context.Context) (*http.Request, error) {
+ if lar.NextLink == nil || len(to.String(lar.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(lar.NextLink)))
+}
+
+// ListApplicationsResultPage contains a page of Application values.
+type ListApplicationsResultPage struct {
+ fn func(context.Context, ListApplicationsResult) (ListApplicationsResult, error)
+ lar ListApplicationsResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *ListApplicationsResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ListApplicationsResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.lar)
+ if err != nil {
+ return err
+ }
+ page.lar = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *ListApplicationsResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page ListApplicationsResultPage) NotDone() bool {
+ return !page.lar.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page ListApplicationsResultPage) Response() ListApplicationsResult {
+ return page.lar
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page ListApplicationsResultPage) Values() []Application {
+ if page.lar.IsEmpty() {
+ return nil
+ }
+ return *page.lar.Value
+}
+
+// Creates a new instance of the ListApplicationsResultPage type.
+func NewListApplicationsResultPage(getNextPage func(context.Context, ListApplicationsResult) (ListApplicationsResult, error)) ListApplicationsResultPage {
+ return ListApplicationsResultPage{fn: getNextPage}
+}
+
+// ListCertificatesResult values returned by the List operation.
+type ListCertificatesResult struct {
+ autorest.Response `json:"-"`
+ // Value - The collection of returned certificates.
+ Value *[]Certificate `json:"value,omitempty"`
+ // NextLink - The continuation token.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// ListCertificatesResultIterator provides access to a complete listing of Certificate values.
+type ListCertificatesResultIterator struct {
+ i int
+ page ListCertificatesResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *ListCertificatesResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ListCertificatesResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *ListCertificatesResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter ListCertificatesResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter ListCertificatesResultIterator) Response() ListCertificatesResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter ListCertificatesResultIterator) Value() Certificate {
+ if !iter.page.NotDone() {
+ return Certificate{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the ListCertificatesResultIterator type.
+func NewListCertificatesResultIterator(page ListCertificatesResultPage) ListCertificatesResultIterator {
+ return ListCertificatesResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (lcr ListCertificatesResult) IsEmpty() bool {
+ return lcr.Value == nil || len(*lcr.Value) == 0
+}
+
+// listCertificatesResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (lcr ListCertificatesResult) listCertificatesResultPreparer(ctx context.Context) (*http.Request, error) {
+ if lcr.NextLink == nil || len(to.String(lcr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(lcr.NextLink)))
+}
+
+// ListCertificatesResultPage contains a page of Certificate values.
+type ListCertificatesResultPage struct {
+ fn func(context.Context, ListCertificatesResult) (ListCertificatesResult, error)
+ lcr ListCertificatesResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *ListCertificatesResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ListCertificatesResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.lcr)
+ if err != nil {
+ return err
+ }
+ page.lcr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *ListCertificatesResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page ListCertificatesResultPage) NotDone() bool {
+ return !page.lcr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page ListCertificatesResultPage) Response() ListCertificatesResult {
+ return page.lcr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page ListCertificatesResultPage) Values() []Certificate {
+ if page.lcr.IsEmpty() {
+ return nil
+ }
+ return *page.lcr.Value
+}
+
+// Creates a new instance of the ListCertificatesResultPage type.
+func NewListCertificatesResultPage(getNextPage func(context.Context, ListCertificatesResult) (ListCertificatesResult, error)) ListCertificatesResultPage {
+ return ListCertificatesResultPage{fn: getNextPage}
+}
+
+// ListPoolsResult values returned by the List operation.
+type ListPoolsResult struct {
+ autorest.Response `json:"-"`
+ // Value - The collection of returned pools.
+ Value *[]Pool `json:"value,omitempty"`
+ // NextLink - The continuation token.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// ListPoolsResultIterator provides access to a complete listing of Pool values.
+type ListPoolsResultIterator struct {
+ i int
+ page ListPoolsResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *ListPoolsResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ListPoolsResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *ListPoolsResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter ListPoolsResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter ListPoolsResultIterator) Response() ListPoolsResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter ListPoolsResultIterator) Value() Pool {
+ if !iter.page.NotDone() {
+ return Pool{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the ListPoolsResultIterator type.
+func NewListPoolsResultIterator(page ListPoolsResultPage) ListPoolsResultIterator {
+ return ListPoolsResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (lpr ListPoolsResult) IsEmpty() bool {
+ return lpr.Value == nil || len(*lpr.Value) == 0
+}
+
+// listPoolsResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (lpr ListPoolsResult) listPoolsResultPreparer(ctx context.Context) (*http.Request, error) {
+ if lpr.NextLink == nil || len(to.String(lpr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(lpr.NextLink)))
+}
+
+// ListPoolsResultPage contains a page of Pool values.
+type ListPoolsResultPage struct {
+ fn func(context.Context, ListPoolsResult) (ListPoolsResult, error)
+ lpr ListPoolsResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *ListPoolsResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ListPoolsResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.lpr)
+ if err != nil {
+ return err
+ }
+ page.lpr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *ListPoolsResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page ListPoolsResultPage) NotDone() bool {
+ return !page.lpr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page ListPoolsResultPage) Response() ListPoolsResult {
+ return page.lpr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page ListPoolsResultPage) Values() []Pool {
+ if page.lpr.IsEmpty() {
+ return nil
+ }
+ return *page.lpr.Value
+}
+
+// Creates a new instance of the ListPoolsResultPage type.
+func NewListPoolsResultPage(getNextPage func(context.Context, ListPoolsResult) (ListPoolsResult, error)) ListPoolsResultPage {
+ return ListPoolsResultPage{fn: getNextPage}
+}
+
+// LocationQuota quotas associated with a Batch region for a particular subscription.
+type LocationQuota struct {
+ autorest.Response `json:"-"`
+ // AccountQuota - READ-ONLY; The number of Batch accounts that may be created under the subscription in the specified region.
+ AccountQuota *int32 `json:"accountQuota,omitempty"`
+}
+
+// MetadataItem the Batch service does not assign any meaning to this metadata; it is solely for the use of
+// user code.
+type MetadataItem struct {
+ Name *string `json:"name,omitempty"`
+ Value *string `json:"value,omitempty"`
+}
+
+// MountConfiguration ...
+type MountConfiguration struct {
+ // AzureBlobFileSystemConfiguration - This property is mutually exclusive with all other properties.
+ AzureBlobFileSystemConfiguration *AzureBlobFileSystemConfiguration `json:"azureBlobFileSystemConfiguration,omitempty"`
+ // NfsMountConfiguration - This property is mutually exclusive with all other properties.
+ NfsMountConfiguration *NFSMountConfiguration `json:"nfsMountConfiguration,omitempty"`
+ // CifsMountConfiguration - This property is mutually exclusive with all other properties.
+ CifsMountConfiguration *CIFSMountConfiguration `json:"cifsMountConfiguration,omitempty"`
+ // AzureFileShareConfiguration - This property is mutually exclusive with all other properties.
+ AzureFileShareConfiguration *AzureFileShareConfiguration `json:"azureFileShareConfiguration,omitempty"`
+}
+
+// NetworkConfiguration the network configuration for a pool.
+type NetworkConfiguration struct {
+ // SubnetID - The virtual network must be in the same region and subscription as the Azure Batch account. The specified subnet should have enough free IP addresses to accommodate the number of nodes in the pool. If the subnet doesn't have enough free IP addresses, the pool will partially allocate compute nodes, and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule tasks on the compute nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the compute nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the compute nodes to unusable. For pools created via virtualMachineConfiguration the Batch account must have poolAllocationMode userSubscription in order to use a VNet. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication. For pools created with a virtual machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. For pools created with a cloud service configuration, enable ports 10100, 20100, and 30100. Also enable outbound connections to Azure Storage on port 443. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration
+ SubnetID *string `json:"subnetId,omitempty"`
+ // EndpointConfiguration - Pool endpoint configuration is only supported on pools with the virtualMachineConfiguration property.
+ EndpointConfiguration *PoolEndpointConfiguration `json:"endpointConfiguration,omitempty"`
+ // PublicIPs - The number of IPs specified here limits the maximum size of the Pool - 50 dedicated nodes or 20 low-priority nodes can be allocated for each public IP. For example, a pool needing 150 dedicated VMs would need at least 3 public IPs specified. Each element of this collection is of the form: /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}.
+ PublicIPs *[]string `json:"publicIPs,omitempty"`
+}
+
+// NetworkSecurityGroupRule ...
+type NetworkSecurityGroupRule struct {
+ // Priority - Priorities within a pool must be unique and are evaluated in order of priority. The lower the number the higher the priority. For example, rules could be specified with order numbers of 150, 250, and 350. The rule with the order number of 150 takes precedence over the rule that has an order of 250. Allowed priorities are 150 to 3500. If any reserved or duplicate values are provided the request fails with HTTP status code 400.
+ Priority *int32 `json:"priority,omitempty"`
+ // Access - Possible values include: 'Allow', 'Deny'
+ Access NetworkSecurityGroupRuleAccess `json:"access,omitempty"`
+ // SourceAddressPrefix - Valid values are a single IP address (i.e. 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, or * (for all addresses). If any other values are provided the request fails with HTTP status code 400.
+ SourceAddressPrefix *string `json:"sourceAddressPrefix,omitempty"`
+ // SourcePortRanges - Valid values are '*' (for all ports 0 - 65535) or arrays of ports or port ranges (i.e. 100-200). The ports should in the range of 0 to 65535 and the port ranges or ports can't overlap. If any other values are provided the request fails with HTTP status code 400. Default value will be *.
+ SourcePortRanges *[]string `json:"sourcePortRanges,omitempty"`
+}
+
+// NFSMountConfiguration ...
+type NFSMountConfiguration struct {
+ Source *string `json:"source,omitempty"`
+ // RelativeMountPath - All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable.
+ RelativeMountPath *string `json:"relativeMountPath,omitempty"`
+ // MountOptions - These are 'net use' options in Windows and 'mount' options in Linux.
+ MountOptions *string `json:"mountOptions,omitempty"`
+}
+
+// Operation ...
+type Operation struct {
+ // Name - This is of the format {provider}/{resource}/{operation}
+ Name *string `json:"name,omitempty"`
+ Display *OperationDisplay `json:"display,omitempty"`
+ Origin *string `json:"origin,omitempty"`
+ Properties interface{} `json:"properties,omitempty"`
+}
+
+// OperationDisplay ...
+type OperationDisplay struct {
+ Provider *string `json:"provider,omitempty"`
+ // Operation - For example: read, write, delete, or listKeys/action
+ Operation *string `json:"operation,omitempty"`
+ Resource *string `json:"resource,omitempty"`
+ Description *string `json:"description,omitempty"`
+}
+
+// OperationListResult ...
+type OperationListResult struct {
+ autorest.Response `json:"-"`
+ Value *[]Operation `json:"value,omitempty"`
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// OperationListResultIterator provides access to a complete listing of Operation values.
+type OperationListResultIterator struct {
+ i int
+ page OperationListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *OperationListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OperationListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *OperationListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter OperationListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter OperationListResultIterator) Response() OperationListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter OperationListResultIterator) Value() Operation {
+ if !iter.page.NotDone() {
+ return Operation{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the OperationListResultIterator type.
+func NewOperationListResultIterator(page OperationListResultPage) OperationListResultIterator {
+ return OperationListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (olr OperationListResult) IsEmpty() bool {
+ return olr.Value == nil || len(*olr.Value) == 0
+}
+
+// operationListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (olr OperationListResult) operationListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if olr.NextLink == nil || len(to.String(olr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(olr.NextLink)))
+}
+
+// OperationListResultPage contains a page of Operation values.
+type OperationListResultPage struct {
+ fn func(context.Context, OperationListResult) (OperationListResult, error)
+ olr OperationListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *OperationListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OperationListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.olr)
+ if err != nil {
+ return err
+ }
+ page.olr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *OperationListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page OperationListResultPage) NotDone() bool {
+ return !page.olr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page OperationListResultPage) Response() OperationListResult {
+ return page.olr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page OperationListResultPage) Values() []Operation {
+ if page.olr.IsEmpty() {
+ return nil
+ }
+ return *page.olr.Value
+}
+
+// Creates a new instance of the OperationListResultPage type.
+func NewOperationListResultPage(getNextPage func(context.Context, OperationListResult) (OperationListResult, error)) OperationListResultPage {
+ return OperationListResultPage{fn: getNextPage}
+}
+
+// Pool contains information about a pool.
+type Pool struct {
+ autorest.Response `json:"-"`
+ // PoolProperties - The properties associated with the pool.
+ *PoolProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; The ID of the resource.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource.
+ Type *string `json:"type,omitempty"`
+ // Etag - READ-ONLY; The ETag of the resource, used for concurrency statements.
+ Etag *string `json:"etag,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for Pool.
+func (p Pool) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if p.PoolProperties != nil {
+ objectMap["properties"] = p.PoolProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for Pool struct.
+func (p *Pool) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var poolProperties PoolProperties
+ err = json.Unmarshal(*v, &poolProperties)
+ if err != nil {
+ return err
+ }
+ p.PoolProperties = &poolProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ p.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ p.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ p.Type = &typeVar
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ p.Etag = &etag
+ }
+ }
+ }
+
+ return nil
+}
+
+// PoolCreateFuture an abstraction for monitoring and retrieving the results of a long-running operation.
+type PoolCreateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *PoolCreateFuture) Result(client PoolClient) (p Pool, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolCreateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("batch.PoolCreateFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if p.Response.Response, err = future.GetResult(sender); err == nil && p.Response.Response.StatusCode != http.StatusNoContent {
+ p, err = client.CreateResponder(p.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolCreateFuture", "Result", p.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// PoolDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation.
+type PoolDeleteFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *PoolDeleteFuture) Result(client PoolClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolDeleteFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("batch.PoolDeleteFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// PoolEndpointConfiguration ...
+type PoolEndpointConfiguration struct {
+ // InboundNatPools - The maximum number of inbound NAT pools per Batch pool is 5. If the maximum number of inbound NAT pools is exceeded the request fails with HTTP status code 400.
+ InboundNatPools *[]InboundNatPool `json:"inboundNatPools,omitempty"`
+}
+
+// PoolProperties pool properties.
+type PoolProperties struct {
+ // DisplayName - The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024.
+ DisplayName *string `json:"displayName,omitempty"`
+ // LastModified - READ-ONLY; This is the last time at which the pool level data, such as the targetDedicatedNodes or autoScaleSettings, changed. It does not factor in node-level changes such as a compute node changing state.
+ LastModified *date.Time `json:"lastModified,omitempty"`
+ // CreationTime - READ-ONLY
+ CreationTime *date.Time `json:"creationTime,omitempty"`
+ // ProvisioningState - READ-ONLY; Possible values include: 'PoolProvisioningStateSucceeded', 'PoolProvisioningStateDeleting'
+ ProvisioningState PoolProvisioningState `json:"provisioningState,omitempty"`
+ // ProvisioningStateTransitionTime - READ-ONLY
+ ProvisioningStateTransitionTime *date.Time `json:"provisioningStateTransitionTime,omitempty"`
+ // AllocationState - READ-ONLY; Possible values include: 'Steady', 'Resizing', 'Stopping'
+ AllocationState AllocationState `json:"allocationState,omitempty"`
+ // AllocationStateTransitionTime - READ-ONLY
+ AllocationStateTransitionTime *date.Time `json:"allocationStateTransitionTime,omitempty"`
+ // VMSize - For information about available sizes of virtual machines for Cloud Services pools (pools created with cloudServiceConfiguration), see Sizes for Cloud Services (http://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/). Batch supports all Cloud Services VM sizes except ExtraSmall. For information about available VM sizes for pools using images from the Virtual Machines Marketplace (pools created with virtualMachineConfiguration) see Sizes for Virtual Machines (Linux) (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) or Sizes for Virtual Machines (Windows) (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series).
+ VMSize *string `json:"vmSize,omitempty"`
+ // DeploymentConfiguration - Using CloudServiceConfiguration specifies that the nodes should be creating using Azure Cloud Services (PaaS), while VirtualMachineConfiguration uses Azure Virtual Machines (IaaS).
+ DeploymentConfiguration *DeploymentConfiguration `json:"deploymentConfiguration,omitempty"`
+ // CurrentDedicatedNodes - READ-ONLY
+ CurrentDedicatedNodes *int32 `json:"currentDedicatedNodes,omitempty"`
+ // CurrentLowPriorityNodes - READ-ONLY
+ CurrentLowPriorityNodes *int32 `json:"currentLowPriorityNodes,omitempty"`
+ ScaleSettings *ScaleSettings `json:"scaleSettings,omitempty"`
+ // AutoScaleRun - READ-ONLY; This property is set only if the pool automatically scales, i.e. autoScaleSettings are used.
+ AutoScaleRun *AutoScaleRun `json:"autoScaleRun,omitempty"`
+ // InterNodeCommunication - This imposes restrictions on which nodes can be assigned to the pool. Enabling this value can reduce the chance of the requested number of nodes to be allocated in the pool. If not specified, this value defaults to 'Disabled'. Possible values include: 'Enabled', 'Disabled'
+ InterNodeCommunication InterNodeCommunicationState `json:"interNodeCommunication,omitempty"`
+ NetworkConfiguration *NetworkConfiguration `json:"networkConfiguration,omitempty"`
+ // MaxTasksPerNode - The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256.
+ MaxTasksPerNode *int32 `json:"maxTasksPerNode,omitempty"`
+ // TaskSchedulingPolicy - If not specified, the default is spread.
+ TaskSchedulingPolicy *TaskSchedulingPolicy `json:"taskSchedulingPolicy,omitempty"`
+ UserAccounts *[]UserAccount `json:"userAccounts,omitempty"`
+ // Metadata - The Batch service does not assign any meaning to metadata; it is solely for the use of user code.
+ Metadata *[]MetadataItem `json:"metadata,omitempty"`
+ // StartTask - In an PATCH (update) operation, this property can be set to an empty object to remove the start task from the pool.
+ StartTask *StartTask `json:"startTask,omitempty"`
+ // Certificates - For Windows compute nodes, the Batch service installs the certificates to the specified certificate store and location. For Linux compute nodes, the certificates are stored in a directory inside the task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this location. For certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and certificates are placed in that directory.
+ Certificates *[]CertificateReference `json:"certificates,omitempty"`
+ // ApplicationPackages - Changes to application package references affect all new compute nodes joining the pool, but do not affect compute nodes that are already in the pool until they are rebooted or reimaged. There is a maximum of 10 application package references on any given pool.
+ ApplicationPackages *[]ApplicationPackageReference `json:"applicationPackages,omitempty"`
+ // ApplicationLicenses - The list of application licenses must be a subset of available Batch service application licenses. If a license is requested which is not supported, pool creation will fail.
+ ApplicationLicenses *[]string `json:"applicationLicenses,omitempty"`
+ // ResizeOperationStatus - READ-ONLY
+ ResizeOperationStatus *ResizeOperationStatus `json:"resizeOperationStatus,omitempty"`
+ // MountConfiguration - This supports Azure Files, NFS, CIFS/SMB, and Blobfuse.
+ MountConfiguration *[]MountConfiguration `json:"mountConfiguration,omitempty"`
+}
+
+// ProxyResource a definition of an Azure resource.
+type ProxyResource struct {
+ // ID - READ-ONLY; The ID of the resource.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource.
+ Type *string `json:"type,omitempty"`
+ // Etag - READ-ONLY; The ETag of the resource, used for concurrency statements.
+ Etag *string `json:"etag,omitempty"`
+}
+
+// ResizeError ...
+type ResizeError struct {
+ // Code - An identifier for the error. Codes are invariant and are intended to be consumed programmatically.
+ Code *string `json:"code,omitempty"`
+ // Message - A message describing the error, intended to be suitable for display in a user interface.
+ Message *string `json:"message,omitempty"`
+ Details *[]ResizeError `json:"details,omitempty"`
+}
+
+// ResizeOperationStatus describes either the current operation (if the pool AllocationState is Resizing)
+// or the previously completed operation (if the AllocationState is Steady).
+type ResizeOperationStatus struct {
+ TargetDedicatedNodes *int32 `json:"targetDedicatedNodes,omitempty"`
+ TargetLowPriorityNodes *int32 `json:"targetLowPriorityNodes,omitempty"`
+ // ResizeTimeout - The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request).
+ ResizeTimeout *string `json:"resizeTimeout,omitempty"`
+ // NodeDeallocationOption - The default value is requeue. Possible values include: 'Requeue', 'Terminate', 'TaskCompletion', 'RetainedData'
+ NodeDeallocationOption ComputeNodeDeallocationOption `json:"nodeDeallocationOption,omitempty"`
+ StartTime *date.Time `json:"startTime,omitempty"`
+ // Errors - This property is set only if an error occurred during the last pool resize, and only when the pool allocationState is Steady.
+ Errors *[]ResizeError `json:"errors,omitempty"`
+}
+
+// Resource a definition of an Azure resource.
+type Resource struct {
+ // ID - READ-ONLY; The ID of the resource.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource.
+ Type *string `json:"type,omitempty"`
+ // Location - READ-ONLY; The location of the resource.
+ Location *string `json:"location,omitempty"`
+ // Tags - READ-ONLY; The tags of the resource.
+ Tags map[string]*string `json:"tags"`
+}
+
+// MarshalJSON is the custom marshaler for Resource.
+func (r Resource) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// ResourceFile ...
+type ResourceFile struct {
+ // AutoStorageContainerName - The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified.
+ AutoStorageContainerName *string `json:"autoStorageContainerName,omitempty"`
+ // StorageContainerURL - The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. This URL must be readable and listable using anonymous access; that is, the Batch service does not present any credentials when downloading the blob. There are two ways to get such a URL for a blob in Azure storage: include a Shared Access Signature (SAS) granting read and list permissions on the blob, or set the ACL for the blob or its container to allow public access.
+ StorageContainerURL *string `json:"storageContainerUrl,omitempty"`
+ // HTTPURL - The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. If the URL is Azure Blob Storage, it must be readable using anonymous access; that is, the Batch service does not present any credentials when downloading the blob. There are two ways to get such a URL for a blob in Azure storage: include a Shared Access Signature (SAS) granting read permissions on the blob, or set the ACL for the blob or its container to allow public access.
+ HTTPURL *string `json:"httpUrl,omitempty"`
+ // BlobPrefix - The property is valid only when autoStorageContainerName or storageContainerUrl is used. This prefix can be a partial filename or a subdirectory. If a prefix is not specified, all the files in the container will be downloaded.
+ BlobPrefix *string `json:"blobPrefix,omitempty"`
+ // FilePath - If the httpUrl property is specified, the filePath is required and describes the path which the file will be downloaded to, including the filename. Otherwise, if the autoStorageContainerName or storageContainerUrl property is specified, filePath is optional and is the directory to download the files to. In the case where filePath is used as a directory, any directory structure already associated with the input data will be retained in full and appended to the specified filePath directory. The specified relative path cannot break out of the task's working directory (for example by using '..').
+ FilePath *string `json:"filePath,omitempty"`
+ // FileMode - This property applies only to files being downloaded to Linux compute nodes. It will be ignored if it is specified for a resourceFile which will be downloaded to a Windows node. If this property is not specified for a Linux node, then a default value of 0770 is applied to the file.
+ FileMode *string `json:"fileMode,omitempty"`
+}
+
+// ScaleSettings defines the desired size of the pool. This can either be 'fixedScale' where the requested
+// targetDedicatedNodes is specified, or 'autoScale' which defines a formula which is periodically
+// reevaluated. If this property is not specified, the pool will have a fixed scale with 0
+// targetDedicatedNodes.
+type ScaleSettings struct {
+ // FixedScale - This property and autoScale are mutually exclusive and one of the properties must be specified.
+ FixedScale *FixedScaleSettings `json:"fixedScale,omitempty"`
+ // AutoScale - This property and fixedScale are mutually exclusive and one of the properties must be specified.
+ AutoScale *AutoScaleSettings `json:"autoScale,omitempty"`
+}
+
+// StartTask in some cases the start task may be re-run even though the node was not rebooted. Due to this,
+// start tasks should be idempotent and exit gracefully if the setup they're performing has already been
+// done. Special care should be taken to avoid start tasks which create breakaway process or install/launch
+// services from the start task working directory, as this will block Batch from being able to re-run the
+// start task.
+type StartTask struct {
+ // CommandLine - The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. Required if any other properties of the startTask are specified.
+ CommandLine *string `json:"commandLine,omitempty"`
+ ResourceFiles *[]ResourceFile `json:"resourceFiles,omitempty"`
+ EnvironmentSettings *[]EnvironmentSetting `json:"environmentSettings,omitempty"`
+ // UserIdentity - If omitted, the task runs as a non-administrative user unique to the task.
+ UserIdentity *UserIdentity `json:"userIdentity,omitempty"`
+ // MaxTaskRetryCount - The Batch service retries a task if its exit code is nonzero. Note that this value specifically controls the number of retries. The Batch service will try the task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries the task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry the task. If the maximum retry count is -1, the Batch service retries the task without limit.
+ MaxTaskRetryCount *int32 `json:"maxTaskRetryCount,omitempty"`
+ // WaitForSuccess - If true and the start task fails on a compute node, the Batch service retries the start task up to its maximum retry count (maxTaskRetryCount). If the task has still not completed successfully after all retries, then the Batch service marks the compute node unusable, and will not schedule tasks to it. This condition can be detected via the node state and scheduling error detail. If false, the Batch service will not wait for the start task to complete. In this case, other tasks can start executing on the compute node while the start task is still running; and even if the start task fails, new tasks will continue to be scheduled on the node. The default is true.
+ WaitForSuccess *bool `json:"waitForSuccess,omitempty"`
+ // ContainerSettings - When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all task environment variables are mapped into the container, and the task command line is executed in the container.
+ ContainerSettings *TaskContainerSettings `json:"containerSettings,omitempty"`
+}
+
+// TaskContainerSettings ...
+type TaskContainerSettings struct {
+ // ContainerRunOptions - These additional options are supplied as arguments to the "docker create" command, in addition to those controlled by the Batch Service.
+ ContainerRunOptions *string `json:"containerRunOptions,omitempty"`
+ // ImageName - This is the full image reference, as would be specified to "docker pull". If no tag is provided as part of the image name, the tag ":latest" is used as a default.
+ ImageName *string `json:"imageName,omitempty"`
+ // Registry - This setting can be omitted if was already provided at pool creation.
+ Registry *ContainerRegistry `json:"registry,omitempty"`
+ // WorkingDirectory - Possible values include: 'TaskWorkingDirectory', 'ContainerImageDefault'
+ WorkingDirectory ContainerWorkingDirectory `json:"workingDirectory,omitempty"`
+}
+
+// TaskSchedulingPolicy ...
+type TaskSchedulingPolicy struct {
+ // NodeFillType - Possible values include: 'Spread', 'Pack'
+ NodeFillType ComputeNodeFillType `json:"nodeFillType,omitempty"`
+}
+
+// UserAccount ...
+type UserAccount struct {
+ Name *string `json:"name,omitempty"`
+ Password *string `json:"password,omitempty"`
+ // ElevationLevel - nonAdmin - The auto user is a standard user without elevated access. admin - The auto user is a user with elevated access and operates with full Administrator permissions. The default value is nonAdmin. Possible values include: 'NonAdmin', 'Admin'
+ ElevationLevel ElevationLevel `json:"elevationLevel,omitempty"`
+ // LinuxUserConfiguration - This property is ignored if specified on a Windows pool. If not specified, the user is created with the default options.
+ LinuxUserConfiguration *LinuxUserConfiguration `json:"linuxUserConfiguration,omitempty"`
+ // WindowsUserConfiguration - This property can only be specified if the user is on a Windows pool. If not specified and on a Windows pool, the user is created with the default options.
+ WindowsUserConfiguration *WindowsUserConfiguration `json:"windowsUserConfiguration,omitempty"`
+}
+
+// UserIdentity specify either the userName or autoUser property, but not both.
+type UserIdentity struct {
+ // UserName - The userName and autoUser properties are mutually exclusive; you must specify one but not both.
+ UserName *string `json:"userName,omitempty"`
+ // AutoUser - The userName and autoUser properties are mutually exclusive; you must specify one but not both.
+ AutoUser *AutoUserSpecification `json:"autoUser,omitempty"`
+}
+
+// VirtualMachineConfiguration ...
+type VirtualMachineConfiguration struct {
+ ImageReference *ImageReference `json:"imageReference,omitempty"`
+ // NodeAgentSkuID - The Batch node agent is a program that runs on each node in the pool, and provides the command-and-control interface between the node and the Batch service. There are different implementations of the node agent, known as SKUs, for different operating systems. You must specify a node agent SKU which matches the selected image reference. To get the list of supported node agent SKUs along with their list of verified image references, see the 'List supported node agent SKUs' operation.
+ NodeAgentSkuID *string `json:"nodeAgentSkuId,omitempty"`
+ // WindowsConfiguration - This property must not be specified if the imageReference specifies a Linux OS image.
+ WindowsConfiguration *WindowsConfiguration `json:"windowsConfiguration,omitempty"`
+ // DataDisks - This property must be specified if the compute nodes in the pool need to have empty data disks attached to them.
+ DataDisks *[]DataDisk `json:"dataDisks,omitempty"`
+ // LicenseType - This only applies to images that contain the Windows operating system, and should only be used when you hold valid on-premises licenses for the nodes which will be deployed. If omitted, no on-premises licensing discount is applied. Values are:
+ // Windows_Server - The on-premises license is for Windows Server.
+ // Windows_Client - The on-premises license is for Windows Client.
+ LicenseType *string `json:"licenseType,omitempty"`
+ // ContainerConfiguration - If specified, setup is performed on each node in the pool to allow tasks to run in containers. All regular tasks and job manager tasks run on this pool must specify the containerSettings property, and all other tasks may specify it.
+ ContainerConfiguration *ContainerConfiguration `json:"containerConfiguration,omitempty"`
+}
+
+// VirtualMachineFamilyCoreQuota a VM Family and its associated core quota for the Batch account.
+type VirtualMachineFamilyCoreQuota struct {
+ // Name - READ-ONLY; The Virtual Machine family name.
+ Name *string `json:"name,omitempty"`
+ // CoreQuota - READ-ONLY; The core quota for the VM family for the Batch account.
+ CoreQuota *int32 `json:"coreQuota,omitempty"`
+}
+
+// WindowsConfiguration ...
+type WindowsConfiguration struct {
+ // EnableAutomaticUpdates - If omitted, the default value is true.
+ EnableAutomaticUpdates *bool `json:"enableAutomaticUpdates,omitempty"`
+}
+
+// WindowsUserConfiguration ...
+type WindowsUserConfiguration struct {
+ // LoginMode - Specifies login mode for the user. The default value for VirtualMachineConfiguration pools is interactive mode and for CloudServiceConfiguration pools is batch mode. Possible values include: 'Batch', 'Interactive'
+ LoginMode LoginMode `json:"loginMode,omitempty"`
+}
diff --git a/services/batch/mgmt/2019-08-01/batch/operations.go b/services/batch/mgmt/2019-08-01/batch/operations.go
new file mode 100644
index 000000000000..ba276f32e2d2
--- /dev/null
+++ b/services/batch/mgmt/2019-08-01/batch/operations.go
@@ -0,0 +1,147 @@
+package batch
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// OperationsClient is the client for the Operations methods of the Batch service.
+type OperationsClient struct {
+ BaseClient
+}
+
+// NewOperationsClient creates an instance of the OperationsClient client.
+func NewOperationsClient(subscriptionID string) OperationsClient {
+ return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client.
+func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient {
+ return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// List lists available operations for the Microsoft.Batch provider
+func (client OperationsClient) List(ctx context.Context) (result OperationListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List")
+ defer func() {
+ sc := -1
+ if result.olr.Response.Response != nil {
+ sc = result.olr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.OperationsClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.olr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.OperationsClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.olr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.OperationsClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) {
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPath("/providers/Microsoft.Batch/operations"),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client OperationsClient) listNextResults(ctx context.Context, lastResults OperationListResult) (result OperationListResult, err error) {
+ req, err := lastResults.operationListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "batch.OperationsClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "batch.OperationsClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.OperationsClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client OperationsClient) ListComplete(ctx context.Context) (result OperationListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx)
+ return
+}
diff --git a/services/batch/mgmt/2019-08-01/batch/pool.go b/services/batch/mgmt/2019-08-01/batch/pool.go
new file mode 100644
index 000000000000..9e960d28c858
--- /dev/null
+++ b/services/batch/mgmt/2019-08-01/batch/pool.go
@@ -0,0 +1,801 @@
+package batch
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// PoolClient is the client for the Pool methods of the Batch service.
+type PoolClient struct {
+ BaseClient
+}
+
+// NewPoolClient creates an instance of the PoolClient client.
+func NewPoolClient(subscriptionID string) PoolClient {
+ return NewPoolClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewPoolClientWithBaseURI creates an instance of the PoolClient client.
+func NewPoolClientWithBaseURI(baseURI string, subscriptionID string) PoolClient {
+ return PoolClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// Create creates a new pool inside the specified account.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the Batch account.
+// accountName - the name of the Batch account.
+// poolName - the pool name. This must be unique within the account.
+// parameters - additional parameters for pool creation.
+// ifMatch - the entity state (ETag) version of the pool to update. A value of "*" can be used to apply the
+// operation only if the pool already exists. If omitted, this operation will always be applied.
+// ifNoneMatch - set to '*' to allow a new pool to be created, but to prevent updating an existing pool. Other
+// values will be ignored.
+func (client PoolClient) Create(ctx context.Context, resourceGroupName string, accountName string, poolName string, parameters Pool, ifMatch string, ifNoneMatch string) (result PoolCreateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.Create")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "accountName", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}},
+ {TargetValue: poolName,
+ Constraints: []validation.Constraint{{Target: "poolName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "poolName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "poolName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_-]+$`, Chain: nil}}},
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters.PoolProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.PoolProperties.DeploymentConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.PoolProperties.DeploymentConfiguration.CloudServiceConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.PoolProperties.DeploymentConfiguration.CloudServiceConfiguration.OsFamily", Name: validation.Null, Rule: true, Chain: nil}}},
+ {Target: "parameters.PoolProperties.DeploymentConfiguration.VirtualMachineConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.PoolProperties.DeploymentConfiguration.VirtualMachineConfiguration.ImageReference", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.PoolProperties.DeploymentConfiguration.VirtualMachineConfiguration.NodeAgentSkuID", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.PoolProperties.DeploymentConfiguration.VirtualMachineConfiguration.ContainerConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.PoolProperties.DeploymentConfiguration.VirtualMachineConfiguration.ContainerConfiguration.Type", Name: validation.Null, Rule: true, Chain: nil}}},
+ }},
+ }},
+ {Target: "parameters.PoolProperties.ScaleSettings", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.PoolProperties.ScaleSettings.AutoScale", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.PoolProperties.ScaleSettings.AutoScale.Formula", Name: validation.Null, Rule: true, Chain: nil}}},
+ }},
+ {Target: "parameters.PoolProperties.AutoScaleRun", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.PoolProperties.AutoScaleRun.EvaluationTime", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.PoolProperties.AutoScaleRun.Error", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.PoolProperties.AutoScaleRun.Error.Code", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.PoolProperties.AutoScaleRun.Error.Message", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ }},
+ {Target: "parameters.PoolProperties.NetworkConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.PoolProperties.NetworkConfiguration.EndpointConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.PoolProperties.NetworkConfiguration.EndpointConfiguration.InboundNatPools", Name: validation.Null, Rule: true, Chain: nil}}},
+ }},
+ {Target: "parameters.PoolProperties.StartTask", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.PoolProperties.StartTask.ContainerSettings", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.PoolProperties.StartTask.ContainerSettings.ImageName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.PoolProperties.StartTask.ContainerSettings.Registry", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.PoolProperties.StartTask.ContainerSettings.Registry.UserName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.PoolProperties.StartTask.ContainerSettings.Registry.Password", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ }},
+ }},
+ }}}}}); err != nil {
+ return result, validation.NewError("batch.PoolClient", "Create", err.Error())
+ }
+
+ req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, poolName, parameters, ifMatch, ifNoneMatch)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "Create", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.CreateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "Create", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// CreatePreparer prepares the Create request.
+func (client PoolClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, poolName string, parameters Pool, ifMatch string, ifNoneMatch string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "poolName": autorest.Encode("path", poolName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateSender sends the Create request. The method will close the
+// http.Response Body if it receives an error.
+func (client PoolClient) CreateSender(req *http.Request) (future PoolCreateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// CreateResponder handles the response to the Create request. The method always
+// closes the http.Response Body.
+func (client PoolClient) CreateResponder(resp *http.Response) (result Pool, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes the specified pool.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the Batch account.
+// accountName - the name of the Batch account.
+// poolName - the pool name. This must be unique within the account.
+func (client PoolClient) Delete(ctx context.Context, resourceGroupName string, accountName string, poolName string) (result PoolDeleteFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "accountName", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}},
+ {TargetValue: poolName,
+ Constraints: []validation.Constraint{{Target: "poolName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "poolName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "poolName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_-]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.PoolClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, poolName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.DeleteSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "Delete", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client PoolClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, poolName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "poolName": autorest.Encode("path", poolName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client PoolClient) DeleteSender(req *http.Request) (future PoolDeleteFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client PoolClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// DisableAutoScale disables automatic scaling for a pool.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the Batch account.
+// accountName - the name of the Batch account.
+// poolName - the pool name. This must be unique within the account.
+func (client PoolClient) DisableAutoScale(ctx context.Context, resourceGroupName string, accountName string, poolName string) (result Pool, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.DisableAutoScale")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "accountName", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}},
+ {TargetValue: poolName,
+ Constraints: []validation.Constraint{{Target: "poolName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "poolName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "poolName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_-]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.PoolClient", "DisableAutoScale", err.Error())
+ }
+
+ req, err := client.DisableAutoScalePreparer(ctx, resourceGroupName, accountName, poolName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "DisableAutoScale", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DisableAutoScaleSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "DisableAutoScale", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DisableAutoScaleResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "DisableAutoScale", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DisableAutoScalePreparer prepares the DisableAutoScale request.
+func (client PoolClient) DisableAutoScalePreparer(ctx context.Context, resourceGroupName string, accountName string, poolName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "poolName": autorest.Encode("path", poolName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}/disableAutoScale", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DisableAutoScaleSender sends the DisableAutoScale request. The method will close the
+// http.Response Body if it receives an error.
+func (client PoolClient) DisableAutoScaleSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DisableAutoScaleResponder handles the response to the DisableAutoScale request. The method always
+// closes the http.Response Body.
+func (client PoolClient) DisableAutoScaleResponder(resp *http.Response) (result Pool, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Get gets information about the specified pool.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the Batch account.
+// accountName - the name of the Batch account.
+// poolName - the pool name. This must be unique within the account.
+func (client PoolClient) Get(ctx context.Context, resourceGroupName string, accountName string, poolName string) (result Pool, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "accountName", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}},
+ {TargetValue: poolName,
+ Constraints: []validation.Constraint{{Target: "poolName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "poolName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "poolName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_-]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.PoolClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, accountName, poolName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client PoolClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, poolName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "poolName": autorest.Encode("path", poolName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client PoolClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client PoolClient) GetResponder(resp *http.Response) (result Pool, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByBatchAccount lists all of the pools in the specified account.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the Batch account.
+// accountName - the name of the Batch account.
+// maxresults - the maximum number of items to return in the response.
+// selectParameter - comma separated list of properties that should be returned. e.g.
+// "properties/provisioningState". Only top level properties under properties/ are valid for selection.
+// filter - oData filter expression. Valid properties for filtering are:
+//
+// name
+// properties/allocationState
+// properties/allocationStateTransitionTime
+// properties/creationTime
+// properties/provisioningState
+// properties/provisioningStateTransitionTime
+// properties/lastModified
+// properties/vmSize
+// properties/interNodeCommunication
+// properties/scaleSettings/autoScale
+// properties/scaleSettings/fixedScale
+func (client PoolClient) ListByBatchAccount(ctx context.Context, resourceGroupName string, accountName string, maxresults *int32, selectParameter string, filter string) (result ListPoolsResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.ListByBatchAccount")
+ defer func() {
+ sc := -1
+ if result.lpr.Response.Response != nil {
+ sc = result.lpr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "accountName", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.PoolClient", "ListByBatchAccount", err.Error())
+ }
+
+ result.fn = client.listByBatchAccountNextResults
+ req, err := client.ListByBatchAccountPreparer(ctx, resourceGroupName, accountName, maxresults, selectParameter, filter)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "ListByBatchAccount", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByBatchAccountSender(req)
+ if err != nil {
+ result.lpr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "ListByBatchAccount", resp, "Failure sending request")
+ return
+ }
+
+ result.lpr, err = client.ListByBatchAccountResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "ListByBatchAccount", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByBatchAccountPreparer prepares the ListByBatchAccount request.
+func (client PoolClient) ListByBatchAccountPreparer(ctx context.Context, resourceGroupName string, accountName string, maxresults *int32, selectParameter string, filter string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if maxresults != nil {
+ queryParameters["maxresults"] = autorest.Encode("query", *maxresults)
+ }
+ if len(selectParameter) > 0 {
+ queryParameters["$select"] = autorest.Encode("query", selectParameter)
+ }
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByBatchAccountSender sends the ListByBatchAccount request. The method will close the
+// http.Response Body if it receives an error.
+func (client PoolClient) ListByBatchAccountSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByBatchAccountResponder handles the response to the ListByBatchAccount request. The method always
+// closes the http.Response Body.
+func (client PoolClient) ListByBatchAccountResponder(resp *http.Response) (result ListPoolsResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByBatchAccountNextResults retrieves the next set of results, if any.
+func (client PoolClient) listByBatchAccountNextResults(ctx context.Context, lastResults ListPoolsResult) (result ListPoolsResult, err error) {
+ req, err := lastResults.listPoolsResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "batch.PoolClient", "listByBatchAccountNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByBatchAccountSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "batch.PoolClient", "listByBatchAccountNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByBatchAccountResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "listByBatchAccountNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByBatchAccountComplete enumerates all values, automatically crossing page boundaries as required.
+func (client PoolClient) ListByBatchAccountComplete(ctx context.Context, resourceGroupName string, accountName string, maxresults *int32, selectParameter string, filter string) (result ListPoolsResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.ListByBatchAccount")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByBatchAccount(ctx, resourceGroupName, accountName, maxresults, selectParameter, filter)
+ return
+}
+
+// StopResize this does not restore the pool to its previous state before the resize operation: it only stops any
+// further changes being made, and the pool maintains its current state. After stopping, the pool stabilizes at the
+// number of nodes it was at when the stop operation was done. During the stop operation, the pool allocation state
+// changes first to stopping and then to steady. A resize operation need not be an explicit resize pool request; this
+// API can also be used to halt the initial sizing of the pool when it is created.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the Batch account.
+// accountName - the name of the Batch account.
+// poolName - the pool name. This must be unique within the account.
+func (client PoolClient) StopResize(ctx context.Context, resourceGroupName string, accountName string, poolName string) (result Pool, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.StopResize")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "accountName", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}},
+ {TargetValue: poolName,
+ Constraints: []validation.Constraint{{Target: "poolName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "poolName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "poolName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_-]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.PoolClient", "StopResize", err.Error())
+ }
+
+ req, err := client.StopResizePreparer(ctx, resourceGroupName, accountName, poolName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "StopResize", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.StopResizeSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "StopResize", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.StopResizeResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "StopResize", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// StopResizePreparer prepares the StopResize request.
+func (client PoolClient) StopResizePreparer(ctx context.Context, resourceGroupName string, accountName string, poolName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "poolName": autorest.Encode("path", poolName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}/stopResize", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// StopResizeSender sends the StopResize request. The method will close the
+// http.Response Body if it receives an error.
+func (client PoolClient) StopResizeSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// StopResizeResponder handles the response to the StopResize request. The method always
+// closes the http.Response Body.
+func (client PoolClient) StopResizeResponder(resp *http.Response) (result Pool, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Update updates the properties of an existing pool.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the Batch account.
+// accountName - the name of the Batch account.
+// poolName - the pool name. This must be unique within the account.
+// parameters - pool properties that should be updated. Properties that are supplied will be updated, any
+// property not supplied will be unchanged.
+// ifMatch - the entity state (ETag) version of the pool to update. This value can be omitted or set to "*" to
+// apply the operation unconditionally.
+func (client PoolClient) Update(ctx context.Context, resourceGroupName string, accountName string, poolName string, parameters Pool, ifMatch string) (result Pool, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoolClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "accountName", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}},
+ {TargetValue: poolName,
+ Constraints: []validation.Constraint{{Target: "poolName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "poolName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "poolName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_-]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("batch.PoolClient", "Update", err.Error())
+ }
+
+ req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, poolName, parameters, ifMatch)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "Update", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.UpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "Update", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.UpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "batch.PoolClient", "Update", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// UpdatePreparer prepares the Update request.
+func (client PoolClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, poolName string, parameters Pool, ifMatch string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "poolName": autorest.Encode("path", poolName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateSender sends the Update request. The method will close the
+// http.Response Body if it receives an error.
+func (client PoolClient) UpdateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// UpdateResponder handles the response to the Update request. The method always
+// closes the http.Response Body.
+func (client PoolClient) UpdateResponder(resp *http.Response) (result Pool, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/batch/mgmt/2019-08-01/batch/version.go b/services/batch/mgmt/2019-08-01/batch/version.go
new file mode 100644
index 000000000000..38e5c092b142
--- /dev/null
+++ b/services/batch/mgmt/2019-08-01/batch/version.go
@@ -0,0 +1,30 @@
+package batch
+
+import "github.com/Azure/azure-sdk-for-go/version"
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+// UserAgent returns the UserAgent string to use when sending http.Requests.
+func UserAgent() string {
+ return "Azure-SDK-For-Go/" + version.Number + " batch/2019-08-01"
+}
+
+// Version returns the semantic version (see http://semver.org) of the client.
+func Version() string {
+ return version.Number
+}
diff --git a/services/cognitiveservices/v1.0/face/largefacelist.go b/services/cognitiveservices/v1.0/face/largefacelist.go
index c55edd5bd3c5..115594d8f745 100644
--- a/services/cognitiveservices/v1.0/face/largefacelist.go
+++ b/services/cognitiveservices/v1.0/face/largefacelist.go
@@ -101,7 +101,8 @@ func (client LargeFaceListClient) AddFaceFromStream(ctx context.Context, largeFa
Constraints: []validation.Constraint{{Target: "largeFaceListID", Name: validation.MaxLength, Rule: 64, Chain: nil},
{Target: "largeFaceListID", Name: validation.Pattern, Rule: `^[a-z0-9-_]+$`, Chain: nil}}},
{TargetValue: userData,
- Constraints: []validation.Constraint{{Target: "userData", Name: validation.MaxLength, Rule: 1024, Chain: nil}}}}); err != nil {
+ Constraints: []validation.Constraint{{Target: "userData", Name: validation.Empty, Rule: false,
+ Chain: []validation.Constraint{{Target: "userData", Name: validation.MaxLength, Rule: 1024, Chain: nil}}}}}}); err != nil {
return result, validation.NewError("face.LargeFaceListClient", "AddFaceFromStream", err.Error())
}
@@ -242,7 +243,8 @@ func (client LargeFaceListClient) AddFaceFromURL(ctx context.Context, largeFaceL
Constraints: []validation.Constraint{{Target: "largeFaceListID", Name: validation.MaxLength, Rule: 64, Chain: nil},
{Target: "largeFaceListID", Name: validation.Pattern, Rule: `^[a-z0-9-_]+$`, Chain: nil}}},
{TargetValue: userData,
- Constraints: []validation.Constraint{{Target: "userData", Name: validation.MaxLength, Rule: 1024, Chain: nil}}},
+ Constraints: []validation.Constraint{{Target: "userData", Name: validation.Empty, Rule: false,
+ Chain: []validation.Constraint{{Target: "userData", Name: validation.MaxLength, Rule: 1024, Chain: nil}}}}},
{TargetValue: imageURL,
Constraints: []validation.Constraint{{Target: "imageURL.URL", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
return result, validation.NewError("face.LargeFaceListClient", "AddFaceFromURL", err.Error())
diff --git a/services/cognitiveservices/v1.0/face/largepersongroup.go b/services/cognitiveservices/v1.0/face/largepersongroup.go
index d90105be18dd..46e06f289f1b 100644
--- a/services/cognitiveservices/v1.0/face/largepersongroup.go
+++ b/services/cognitiveservices/v1.0/face/largepersongroup.go
@@ -423,7 +423,8 @@ func (client LargePersonGroupClient) List(ctx context.Context, start string, top
}
if err := validation.Validate([]validation.Validation{
{TargetValue: start,
- Constraints: []validation.Constraint{{Target: "start", Name: validation.MaxLength, Rule: 64, Chain: nil}}},
+ Constraints: []validation.Constraint{{Target: "start", Name: validation.Empty, Rule: false,
+ Chain: []validation.Constraint{{Target: "start", Name: validation.MaxLength, Rule: 64, Chain: nil}}}}},
{TargetValue: top,
Constraints: []validation.Constraint{{Target: "top", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "top", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil},
diff --git a/services/cognitiveservices/v1.0/face/largepersongroupperson.go b/services/cognitiveservices/v1.0/face/largepersongroupperson.go
index 0ce7061197c4..9b91259e4402 100644
--- a/services/cognitiveservices/v1.0/face/largepersongroupperson.go
+++ b/services/cognitiveservices/v1.0/face/largepersongroupperson.go
@@ -100,7 +100,8 @@ func (client LargePersonGroupPersonClient) AddFaceFromStream(ctx context.Context
Constraints: []validation.Constraint{{Target: "largePersonGroupID", Name: validation.MaxLength, Rule: 64, Chain: nil},
{Target: "largePersonGroupID", Name: validation.Pattern, Rule: `^[a-z0-9-_]+$`, Chain: nil}}},
{TargetValue: userData,
- Constraints: []validation.Constraint{{Target: "userData", Name: validation.MaxLength, Rule: 1024, Chain: nil}}}}); err != nil {
+ Constraints: []validation.Constraint{{Target: "userData", Name: validation.Empty, Rule: false,
+ Chain: []validation.Constraint{{Target: "userData", Name: validation.MaxLength, Rule: 1024, Chain: nil}}}}}}); err != nil {
return result, validation.NewError("face.LargePersonGroupPersonClient", "AddFaceFromStream", err.Error())
}
@@ -241,7 +242,8 @@ func (client LargePersonGroupPersonClient) AddFaceFromURL(ctx context.Context, l
Constraints: []validation.Constraint{{Target: "largePersonGroupID", Name: validation.MaxLength, Rule: 64, Chain: nil},
{Target: "largePersonGroupID", Name: validation.Pattern, Rule: `^[a-z0-9-_]+$`, Chain: nil}}},
{TargetValue: userData,
- Constraints: []validation.Constraint{{Target: "userData", Name: validation.MaxLength, Rule: 1024, Chain: nil}}},
+ Constraints: []validation.Constraint{{Target: "userData", Name: validation.Empty, Rule: false,
+ Chain: []validation.Constraint{{Target: "userData", Name: validation.MaxLength, Rule: 1024, Chain: nil}}}}},
{TargetValue: imageURL,
Constraints: []validation.Constraint{{Target: "imageURL.URL", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
return result, validation.NewError("face.LargePersonGroupPersonClient", "AddFaceFromURL", err.Error())
diff --git a/services/cognitiveservices/v1.0/face/list.go b/services/cognitiveservices/v1.0/face/list.go
index 9b844fe990e0..3d8db7995409 100644
--- a/services/cognitiveservices/v1.0/face/list.go
+++ b/services/cognitiveservices/v1.0/face/list.go
@@ -97,7 +97,8 @@ func (client ListClient) AddFaceFromStream(ctx context.Context, faceListID strin
Constraints: []validation.Constraint{{Target: "faceListID", Name: validation.MaxLength, Rule: 64, Chain: nil},
{Target: "faceListID", Name: validation.Pattern, Rule: `^[a-z0-9-_]+$`, Chain: nil}}},
{TargetValue: userData,
- Constraints: []validation.Constraint{{Target: "userData", Name: validation.MaxLength, Rule: 1024, Chain: nil}}}}); err != nil {
+ Constraints: []validation.Constraint{{Target: "userData", Name: validation.Empty, Rule: false,
+ Chain: []validation.Constraint{{Target: "userData", Name: validation.MaxLength, Rule: 1024, Chain: nil}}}}}}); err != nil {
return result, validation.NewError("face.ListClient", "AddFaceFromStream", err.Error())
}
@@ -234,7 +235,8 @@ func (client ListClient) AddFaceFromURL(ctx context.Context, faceListID string,
Constraints: []validation.Constraint{{Target: "faceListID", Name: validation.MaxLength, Rule: 64, Chain: nil},
{Target: "faceListID", Name: validation.Pattern, Rule: `^[a-z0-9-_]+$`, Chain: nil}}},
{TargetValue: userData,
- Constraints: []validation.Constraint{{Target: "userData", Name: validation.MaxLength, Rule: 1024, Chain: nil}}},
+ Constraints: []validation.Constraint{{Target: "userData", Name: validation.Empty, Rule: false,
+ Chain: []validation.Constraint{{Target: "userData", Name: validation.MaxLength, Rule: 1024, Chain: nil}}}}},
{TargetValue: imageURL,
Constraints: []validation.Constraint{{Target: "imageURL.URL", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
return result, validation.NewError("face.ListClient", "AddFaceFromURL", err.Error())
diff --git a/services/cognitiveservices/v1.0/face/persongroup.go b/services/cognitiveservices/v1.0/face/persongroup.go
index 123455686495..4fcae71db274 100644
--- a/services/cognitiveservices/v1.0/face/persongroup.go
+++ b/services/cognitiveservices/v1.0/face/persongroup.go
@@ -419,7 +419,8 @@ func (client PersonGroupClient) List(ctx context.Context, start string, top *int
}
if err := validation.Validate([]validation.Validation{
{TargetValue: start,
- Constraints: []validation.Constraint{{Target: "start", Name: validation.MaxLength, Rule: 64, Chain: nil}}},
+ Constraints: []validation.Constraint{{Target: "start", Name: validation.Empty, Rule: false,
+ Chain: []validation.Constraint{{Target: "start", Name: validation.MaxLength, Rule: 64, Chain: nil}}}}},
{TargetValue: top,
Constraints: []validation.Constraint{{Target: "top", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "top", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil},
diff --git a/services/cognitiveservices/v1.0/face/persongroupperson.go b/services/cognitiveservices/v1.0/face/persongroupperson.go
index ca1a8baa1867..dd908336cb90 100644
--- a/services/cognitiveservices/v1.0/face/persongroupperson.go
+++ b/services/cognitiveservices/v1.0/face/persongroupperson.go
@@ -101,7 +101,8 @@ func (client PersonGroupPersonClient) AddFaceFromStream(ctx context.Context, per
Constraints: []validation.Constraint{{Target: "personGroupID", Name: validation.MaxLength, Rule: 64, Chain: nil},
{Target: "personGroupID", Name: validation.Pattern, Rule: `^[a-z0-9-_]+$`, Chain: nil}}},
{TargetValue: userData,
- Constraints: []validation.Constraint{{Target: "userData", Name: validation.MaxLength, Rule: 1024, Chain: nil}}}}); err != nil {
+ Constraints: []validation.Constraint{{Target: "userData", Name: validation.Empty, Rule: false,
+ Chain: []validation.Constraint{{Target: "userData", Name: validation.MaxLength, Rule: 1024, Chain: nil}}}}}}); err != nil {
return result, validation.NewError("face.PersonGroupPersonClient", "AddFaceFromStream", err.Error())
}
@@ -243,7 +244,8 @@ func (client PersonGroupPersonClient) AddFaceFromURL(ctx context.Context, person
Constraints: []validation.Constraint{{Target: "personGroupID", Name: validation.MaxLength, Rule: 64, Chain: nil},
{Target: "personGroupID", Name: validation.Pattern, Rule: `^[a-z0-9-_]+$`, Chain: nil}}},
{TargetValue: userData,
- Constraints: []validation.Constraint{{Target: "userData", Name: validation.MaxLength, Rule: 1024, Chain: nil}}},
+ Constraints: []validation.Constraint{{Target: "userData", Name: validation.Empty, Rule: false,
+ Chain: []validation.Constraint{{Target: "userData", Name: validation.MaxLength, Rule: 1024, Chain: nil}}}}},
{TargetValue: imageURL,
Constraints: []validation.Constraint{{Target: "imageURL.URL", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
return result, validation.NewError("face.PersonGroupPersonClient", "AddFaceFromURL", err.Error())
diff --git a/services/cognitiveservices/v2.1/computervision/client.go b/services/cognitiveservices/v2.1/computervision/client.go
new file mode 100644
index 000000000000..6e2d02a2c27b
--- /dev/null
+++ b/services/cognitiveservices/v2.1/computervision/client.go
@@ -0,0 +1,2068 @@
+// Package computervision implements the Azure ARM Computervision service API version 2.1.
+//
+// The Computer Vision API provides state-of-the-art algorithms to process images and return information. For example,
+// it can be used to determine if an image contains mature content, or it can be used to find all the faces in an
+// image. It also has other features like estimating dominant and accent colors, categorizing the content of images,
+// and describing an image with complete English sentences. Additionally, it can also intelligently generate images
+// thumbnails for displaying large images effectively.
+package computervision
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "io"
+ "net/http"
+)
+
+// BaseClient is the base client for Computervision.
+type BaseClient struct {
+ autorest.Client
+ Endpoint string
+}
+
+// New creates an instance of the BaseClient client.
+func New(endpoint string) BaseClient {
+ return NewWithoutDefaults(endpoint)
+}
+
+// NewWithoutDefaults creates an instance of the BaseClient client.
+func NewWithoutDefaults(endpoint string) BaseClient {
+ return BaseClient{
+ Client: autorest.NewClientWithUserAgent(UserAgent()),
+ Endpoint: endpoint,
+ }
+}
+
+// AnalyzeImage this operation extracts a rich set of visual features based on the image content.
+// Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there
+// is an optional parameter to allow you to choose which features to return. By default, image categories are returned
+// in the response.
+// A successful response will be returned in JSON. If the request failed, the response will contain an error code and a
+// message to help understand what went wrong.
+// Parameters:
+// imageURL - a JSON document with a URL pointing to the image that is to be analyzed.
+// visualFeatures - a string indicating what visual feature types to return. Multiple values should be
+// comma-separated. Valid visual feature types include: Categories - categorizes image content according to a
+// taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image
+// content. Description - describes the image content with a complete English sentence. Faces - detects if
+// faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart
+// or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.
+// Adult - detects if the image is pornographic in nature (depicts nudity or a sex act), or is gory (depicts
+// extreme violence or blood). Sexually suggestive content (aka racy content) is also detected. Objects -
+// detects various objects within an image, including the approximate location. The Objects argument is only
+// available in English. Brands - detects various brands within an image, including the approximate location.
+// The Brands argument is only available in English.
+// details - a string indicating which domain-specific details to return. Multiple values should be
+// comma-separated. Valid visual feature types include: Celebrities - identifies celebrities if detected in the
+// image, Landmarks - identifies notable landmarks in the image.
+// language - the desired language for output generation. If this parameter is not specified, the default value
+// is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese,
+// zh - Simplified Chinese.
+// descriptionExclude - turn off specified domain models when generating the description.
+func (client BaseClient) AnalyzeImage(ctx context.Context, imageURL ImageURL, visualFeatures []VisualFeatureTypes, details []Details, language string, descriptionExclude []DescriptionExclude) (result ImageAnalysis, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.AnalyzeImage")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: imageURL,
+ Constraints: []validation.Constraint{{Target: "imageURL.URL", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("computervision.BaseClient", "AnalyzeImage", err.Error())
+ }
+
+ req, err := client.AnalyzeImagePreparer(ctx, imageURL, visualFeatures, details, language, descriptionExclude)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "AnalyzeImage", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.AnalyzeImageSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "AnalyzeImage", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.AnalyzeImageResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "AnalyzeImage", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// AnalyzeImagePreparer prepares the AnalyzeImage request.
+func (client BaseClient) AnalyzeImagePreparer(ctx context.Context, imageURL ImageURL, visualFeatures []VisualFeatureTypes, details []Details, language string, descriptionExclude []DescriptionExclude) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "Endpoint": client.Endpoint,
+ }
+
+ queryParameters := map[string]interface{}{}
+ if visualFeatures != nil && len(visualFeatures) > 0 {
+ queryParameters["visualFeatures"] = autorest.Encode("query", visualFeatures, ",")
+ }
+ if details != nil && len(details) > 0 {
+ queryParameters["details"] = autorest.Encode("query", details, ",")
+ }
+ if len(string(language)) > 0 {
+ queryParameters["language"] = autorest.Encode("query", language)
+ } else {
+ queryParameters["language"] = autorest.Encode("query", "en")
+ }
+ if descriptionExclude != nil && len(descriptionExclude) > 0 {
+ queryParameters["descriptionExclude"] = autorest.Encode("query", descriptionExclude, ",")
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{Endpoint}/vision/v2.1", urlParameters),
+ autorest.WithPath("/analyze"),
+ autorest.WithJSON(imageURL),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// AnalyzeImageSender sends the AnalyzeImage request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) AnalyzeImageSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// AnalyzeImageResponder handles the response to the AnalyzeImage request. The method always
+// closes the http.Response Body.
+func (client BaseClient) AnalyzeImageResponder(resp *http.Response) (result ImageAnalysis, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// AnalyzeImageByDomain this operation recognizes content within an image by applying a domain-specific model. The list
+// of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET
+// request. Currently, the API provides following domain-specific models: celebrities, landmarks.
+// Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.
+// A successful response will be returned in JSON.
+// If the request failed, the response will contain an error code and a message to help understand what went wrong.
+// Parameters:
+// model - the domain-specific content to recognize.
+// imageURL - a JSON document with a URL pointing to the image that is to be analyzed.
+// language - the desired language for output generation. If this parameter is not specified, the default value
+// is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese,
+// zh - Simplified Chinese.
+func (client BaseClient) AnalyzeImageByDomain(ctx context.Context, model string, imageURL ImageURL, language string) (result DomainModelResults, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.AnalyzeImageByDomain")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: imageURL,
+ Constraints: []validation.Constraint{{Target: "imageURL.URL", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("computervision.BaseClient", "AnalyzeImageByDomain", err.Error())
+ }
+
+ req, err := client.AnalyzeImageByDomainPreparer(ctx, model, imageURL, language)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "AnalyzeImageByDomain", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.AnalyzeImageByDomainSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "AnalyzeImageByDomain", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.AnalyzeImageByDomainResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "AnalyzeImageByDomain", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// AnalyzeImageByDomainPreparer prepares the AnalyzeImageByDomain request.
+func (client BaseClient) AnalyzeImageByDomainPreparer(ctx context.Context, model string, imageURL ImageURL, language string) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "Endpoint": client.Endpoint,
+ }
+
+ pathParameters := map[string]interface{}{
+ "model": autorest.Encode("path", model),
+ }
+
+ queryParameters := map[string]interface{}{}
+ if len(string(language)) > 0 {
+ queryParameters["language"] = autorest.Encode("query", language)
+ } else {
+ queryParameters["language"] = autorest.Encode("query", "en")
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{Endpoint}/vision/v2.1", urlParameters),
+ autorest.WithPathParameters("/models/{model}/analyze", pathParameters),
+ autorest.WithJSON(imageURL),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// AnalyzeImageByDomainSender sends the AnalyzeImageByDomain request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) AnalyzeImageByDomainSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// AnalyzeImageByDomainResponder handles the response to the AnalyzeImageByDomain request. The method always
+// closes the http.Response Body.
+func (client BaseClient) AnalyzeImageByDomainResponder(resp *http.Response) (result DomainModelResults, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// AnalyzeImageByDomainInStream this operation recognizes content within an image by applying a domain-specific model.
+// The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models
+// GET request. Currently, the API provides following domain-specific models: celebrities, landmarks.
+// Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.
+// A successful response will be returned in JSON.
+// If the request failed, the response will contain an error code and a message to help understand what went wrong.
+// Parameters:
+// model - the domain-specific content to recognize.
+// imageParameter - an image stream.
+// language - the desired language for output generation. If this parameter is not specified, the default value
+// is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese,
+// zh - Simplified Chinese.
+func (client BaseClient) AnalyzeImageByDomainInStream(ctx context.Context, model string, imageParameter io.ReadCloser, language string) (result DomainModelResults, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.AnalyzeImageByDomainInStream")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.AnalyzeImageByDomainInStreamPreparer(ctx, model, imageParameter, language)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "AnalyzeImageByDomainInStream", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.AnalyzeImageByDomainInStreamSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "AnalyzeImageByDomainInStream", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.AnalyzeImageByDomainInStreamResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "AnalyzeImageByDomainInStream", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// AnalyzeImageByDomainInStreamPreparer prepares the AnalyzeImageByDomainInStream request.
+func (client BaseClient) AnalyzeImageByDomainInStreamPreparer(ctx context.Context, model string, imageParameter io.ReadCloser, language string) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "Endpoint": client.Endpoint,
+ }
+
+ pathParameters := map[string]interface{}{
+ "model": autorest.Encode("path", model),
+ }
+
+ queryParameters := map[string]interface{}{}
+ if len(string(language)) > 0 {
+ queryParameters["language"] = autorest.Encode("query", language)
+ } else {
+ queryParameters["language"] = autorest.Encode("query", "en")
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/octet-stream"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{Endpoint}/vision/v2.1", urlParameters),
+ autorest.WithPathParameters("/models/{model}/analyze", pathParameters),
+ autorest.WithFile(imageParameter),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// AnalyzeImageByDomainInStreamSender sends the AnalyzeImageByDomainInStream request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) AnalyzeImageByDomainInStreamSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// AnalyzeImageByDomainInStreamResponder handles the response to the AnalyzeImageByDomainInStream request. The method always
+// closes the http.Response Body.
+func (client BaseClient) AnalyzeImageByDomainInStreamResponder(resp *http.Response) (result DomainModelResults, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// AnalyzeImageInStream this operation extracts a rich set of visual features based on the image content.
+// Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there
+// is an optional parameter to allow you to choose which features to return. By default, image categories are returned
+// in the response.
+// A successful response will be returned in JSON. If the request failed, the response will contain an error code and a
+// message to help understand what went wrong.
+// Parameters:
+// imageParameter - an image stream.
+// visualFeatures - a string indicating what visual feature types to return. Multiple values should be
+// comma-separated. Valid visual feature types include: Categories - categorizes image content according to a
+// taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image
+// content. Description - describes the image content with a complete English sentence. Faces - detects if
+// faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart
+// or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.
+// Adult - detects if the image is pornographic in nature (depicts nudity or a sex act), or is gory (depicts
+// extreme violence or blood). Sexually suggestive content (aka racy content) is also detected. Objects -
+// detects various objects within an image, including the approximate location. The Objects argument is only
+// available in English. Brands - detects various brands within an image, including the approximate location.
+// The Brands argument is only available in English.
+// details - a string indicating which domain-specific details to return. Multiple values should be
+// comma-separated. Valid visual feature types include: Celebrities - identifies celebrities if detected in the
+// image, Landmarks - identifies notable landmarks in the image.
+// language - the desired language for output generation. If this parameter is not specified, the default value
+// is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese,
+// zh - Simplified Chinese.
+// descriptionExclude - turn off specified domain models when generating the description.
+func (client BaseClient) AnalyzeImageInStream(ctx context.Context, imageParameter io.ReadCloser, visualFeatures []VisualFeatureTypes, details []Details, language string, descriptionExclude []DescriptionExclude) (result ImageAnalysis, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.AnalyzeImageInStream")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.AnalyzeImageInStreamPreparer(ctx, imageParameter, visualFeatures, details, language, descriptionExclude)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "AnalyzeImageInStream", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.AnalyzeImageInStreamSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "AnalyzeImageInStream", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.AnalyzeImageInStreamResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "AnalyzeImageInStream", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// AnalyzeImageInStreamPreparer prepares the AnalyzeImageInStream request.
+func (client BaseClient) AnalyzeImageInStreamPreparer(ctx context.Context, imageParameter io.ReadCloser, visualFeatures []VisualFeatureTypes, details []Details, language string, descriptionExclude []DescriptionExclude) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "Endpoint": client.Endpoint,
+ }
+
+ queryParameters := map[string]interface{}{}
+ if visualFeatures != nil && len(visualFeatures) > 0 {
+ queryParameters["visualFeatures"] = autorest.Encode("query", visualFeatures, ",")
+ }
+ if details != nil && len(details) > 0 {
+ queryParameters["details"] = autorest.Encode("query", details, ",")
+ }
+ if len(string(language)) > 0 {
+ queryParameters["language"] = autorest.Encode("query", language)
+ } else {
+ queryParameters["language"] = autorest.Encode("query", "en")
+ }
+ if descriptionExclude != nil && len(descriptionExclude) > 0 {
+ queryParameters["descriptionExclude"] = autorest.Encode("query", descriptionExclude, ",")
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/octet-stream"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{Endpoint}/vision/v2.1", urlParameters),
+ autorest.WithPath("/analyze"),
+ autorest.WithFile(imageParameter),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// AnalyzeImageInStreamSender sends the AnalyzeImageInStream request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) AnalyzeImageInStreamSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// AnalyzeImageInStreamResponder handles the response to the AnalyzeImageInStream request. The method always
+// closes the http.Response Body.
+func (client BaseClient) AnalyzeImageInStreamResponder(resp *http.Response) (result ImageAnalysis, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// BatchReadFile use this interface to get the result of a Read operation, employing the state-of-the-art Optical
+// Character Recognition (OCR) algorithms optimized for text-heavy documents. When you use the Read File interface, the
+// response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must
+// use for your 'GetReadOperationResult' operation to access OCR results.
+// Parameters:
+// imageURL - a JSON document with a URL pointing to the image that is to be analyzed.
+func (client BaseClient) BatchReadFile(ctx context.Context, imageURL ImageURL) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.BatchReadFile")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: imageURL,
+ Constraints: []validation.Constraint{{Target: "imageURL.URL", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("computervision.BaseClient", "BatchReadFile", err.Error())
+ }
+
+ req, err := client.BatchReadFilePreparer(ctx, imageURL)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "BatchReadFile", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.BatchReadFileSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "BatchReadFile", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.BatchReadFileResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "BatchReadFile", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// BatchReadFilePreparer prepares the BatchReadFile request.
+func (client BaseClient) BatchReadFilePreparer(ctx context.Context, imageURL ImageURL) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "Endpoint": client.Endpoint,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{Endpoint}/vision/v2.1", urlParameters),
+ autorest.WithPath("/read/core/asyncBatchAnalyze"),
+ autorest.WithJSON(imageURL))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// BatchReadFileSender sends the BatchReadFile request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) BatchReadFileSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// BatchReadFileResponder handles the response to the BatchReadFile request. The method always
+// closes the http.Response Body.
+func (client BaseClient) BatchReadFileResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// BatchReadFileInStream use this interface to get the result of a Read Document operation, employing the
+// state-of-the-art Optical Character Recognition (OCR) algorithms optimized for text-heavy documents. When you use the
+// Read Document interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field
+// contains the URL that you must use for your 'Get Read Result operation' to access OCR results.
+// Parameters:
+// imageParameter - an image stream.
+func (client BaseClient) BatchReadFileInStream(ctx context.Context, imageParameter io.ReadCloser) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.BatchReadFileInStream")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.BatchReadFileInStreamPreparer(ctx, imageParameter)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "BatchReadFileInStream", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.BatchReadFileInStreamSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "BatchReadFileInStream", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.BatchReadFileInStreamResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "BatchReadFileInStream", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// BatchReadFileInStreamPreparer prepares the BatchReadFileInStream request.
+func (client BaseClient) BatchReadFileInStreamPreparer(ctx context.Context, imageParameter io.ReadCloser) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "Endpoint": client.Endpoint,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/octet-stream"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{Endpoint}/vision/v2.1", urlParameters),
+ autorest.WithPath("/read/core/asyncBatchAnalyze"),
+ autorest.WithFile(imageParameter))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// BatchReadFileInStreamSender sends the BatchReadFileInStream request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) BatchReadFileInStreamSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// BatchReadFileInStreamResponder handles the response to the BatchReadFileInStream request. The method always
+// closes the http.Response Body.
+func (client BaseClient) BatchReadFileInStreamResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// DescribeImage this operation generates a description of an image in human readable language with complete sentences.
+// The description is based on a collection of content tags, which are also returned by the operation. More than one
+// description can be generated for each image. Descriptions are ordered by their confidence score. Descriptions may
+// include results from celebrity and landmark domain models, if applicable.
+// Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.
+// A successful response will be returned in JSON. If the request failed, the response will contain an error code and a
+// message to help understand what went wrong.
+// Parameters:
+// imageURL - a JSON document with a URL pointing to the image that is to be analyzed.
+// maxCandidates - maximum number of candidate descriptions to be returned. The default is 1.
+// language - the desired language for output generation. If this parameter is not specified, the default value
+// is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese,
+// zh - Simplified Chinese.
+// descriptionExclude - turn off specified domain models when generating the description.
+func (client BaseClient) DescribeImage(ctx context.Context, imageURL ImageURL, maxCandidates *int32, language string, descriptionExclude []DescriptionExclude) (result ImageDescription, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DescribeImage")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: imageURL,
+ Constraints: []validation.Constraint{{Target: "imageURL.URL", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("computervision.BaseClient", "DescribeImage", err.Error())
+ }
+
+ req, err := client.DescribeImagePreparer(ctx, imageURL, maxCandidates, language, descriptionExclude)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "DescribeImage", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DescribeImageSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "DescribeImage", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DescribeImageResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "DescribeImage", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DescribeImagePreparer prepares the DescribeImage request.
+func (client BaseClient) DescribeImagePreparer(ctx context.Context, imageURL ImageURL, maxCandidates *int32, language string, descriptionExclude []DescriptionExclude) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "Endpoint": client.Endpoint,
+ }
+
+ queryParameters := map[string]interface{}{}
+ if maxCandidates != nil {
+ queryParameters["maxCandidates"] = autorest.Encode("query", *maxCandidates)
+ } else {
+ queryParameters["maxCandidates"] = autorest.Encode("query", 1)
+ }
+ if len(string(language)) > 0 {
+ queryParameters["language"] = autorest.Encode("query", language)
+ } else {
+ queryParameters["language"] = autorest.Encode("query", "en")
+ }
+ if descriptionExclude != nil && len(descriptionExclude) > 0 {
+ queryParameters["descriptionExclude"] = autorest.Encode("query", descriptionExclude, ",")
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{Endpoint}/vision/v2.1", urlParameters),
+ autorest.WithPath("/describe"),
+ autorest.WithJSON(imageURL),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DescribeImageSender sends the DescribeImage request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) DescribeImageSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DescribeImageResponder handles the response to the DescribeImage request. The method always
+// closes the http.Response Body.
+func (client BaseClient) DescribeImageResponder(resp *http.Response) (result ImageDescription, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// DescribeImageInStream this operation generates a description of an image in human readable language with complete
+// sentences. The description is based on a collection of content tags, which are also returned by the operation. More
+// than one description can be generated for each image. Descriptions are ordered by their confidence score.
+// Descriptions may include results from celebrity and landmark domain models, if applicable.
+// Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.
+// A successful response will be returned in JSON. If the request failed, the response will contain an error code and a
+// message to help understand what went wrong.
+// Parameters:
+// imageParameter - an image stream.
+// maxCandidates - maximum number of candidate descriptions to be returned. The default is 1.
+// language - the desired language for output generation. If this parameter is not specified, the default value
+// is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese,
+// zh - Simplified Chinese.
+// descriptionExclude - turn off specified domain models when generating the description.
+func (client BaseClient) DescribeImageInStream(ctx context.Context, imageParameter io.ReadCloser, maxCandidates *int32, language string, descriptionExclude []DescriptionExclude) (result ImageDescription, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DescribeImageInStream")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DescribeImageInStreamPreparer(ctx, imageParameter, maxCandidates, language, descriptionExclude)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "DescribeImageInStream", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DescribeImageInStreamSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "DescribeImageInStream", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DescribeImageInStreamResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "DescribeImageInStream", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DescribeImageInStreamPreparer prepares the DescribeImageInStream request.
+func (client BaseClient) DescribeImageInStreamPreparer(ctx context.Context, imageParameter io.ReadCloser, maxCandidates *int32, language string, descriptionExclude []DescriptionExclude) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "Endpoint": client.Endpoint,
+ }
+
+ queryParameters := map[string]interface{}{}
+ if maxCandidates != nil {
+ queryParameters["maxCandidates"] = autorest.Encode("query", *maxCandidates)
+ } else {
+ queryParameters["maxCandidates"] = autorest.Encode("query", 1)
+ }
+ if len(string(language)) > 0 {
+ queryParameters["language"] = autorest.Encode("query", language)
+ } else {
+ queryParameters["language"] = autorest.Encode("query", "en")
+ }
+ if descriptionExclude != nil && len(descriptionExclude) > 0 {
+ queryParameters["descriptionExclude"] = autorest.Encode("query", descriptionExclude, ",")
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/octet-stream"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{Endpoint}/vision/v2.1", urlParameters),
+ autorest.WithPath("/describe"),
+ autorest.WithFile(imageParameter),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DescribeImageInStreamSender sends the DescribeImageInStream request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) DescribeImageInStreamSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DescribeImageInStreamResponder handles the response to the DescribeImageInStream request. The method always
+// closes the http.Response Body.
+func (client BaseClient) DescribeImageInStreamResponder(resp *http.Response) (result ImageDescription, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// DetectObjects performs object detection on the specified image.
+// Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.
+// A successful response will be returned in JSON. If the request failed, the response will contain an error code and a
+// message to help understand what went wrong.
+// Parameters:
+// imageURL - a JSON document with a URL pointing to the image that is to be analyzed.
+func (client BaseClient) DetectObjects(ctx context.Context, imageURL ImageURL) (result DetectResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DetectObjects")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: imageURL,
+ Constraints: []validation.Constraint{{Target: "imageURL.URL", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("computervision.BaseClient", "DetectObjects", err.Error())
+ }
+
+ req, err := client.DetectObjectsPreparer(ctx, imageURL)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "DetectObjects", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DetectObjectsSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "DetectObjects", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DetectObjectsResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "DetectObjects", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DetectObjectsPreparer prepares the DetectObjects request.
+func (client BaseClient) DetectObjectsPreparer(ctx context.Context, imageURL ImageURL) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "Endpoint": client.Endpoint,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{Endpoint}/vision/v2.1", urlParameters),
+ autorest.WithPath("/detect"),
+ autorest.WithJSON(imageURL))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DetectObjectsSender sends the DetectObjects request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) DetectObjectsSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DetectObjectsResponder handles the response to the DetectObjects request. The method always
+// closes the http.Response Body.
+func (client BaseClient) DetectObjectsResponder(resp *http.Response) (result DetectResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// DetectObjectsInStream performs object detection on the specified image.
+// Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.
+// A successful response will be returned in JSON. If the request failed, the response will contain an error code and a
+// message to help understand what went wrong.
+// Parameters:
+// imageParameter - an image stream.
+func (client BaseClient) DetectObjectsInStream(ctx context.Context, imageParameter io.ReadCloser) (result DetectResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DetectObjectsInStream")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DetectObjectsInStreamPreparer(ctx, imageParameter)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "DetectObjectsInStream", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DetectObjectsInStreamSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "DetectObjectsInStream", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DetectObjectsInStreamResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "DetectObjectsInStream", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DetectObjectsInStreamPreparer prepares the DetectObjectsInStream request.
+func (client BaseClient) DetectObjectsInStreamPreparer(ctx context.Context, imageParameter io.ReadCloser) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "Endpoint": client.Endpoint,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/octet-stream"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{Endpoint}/vision/v2.1", urlParameters),
+ autorest.WithPath("/detect"),
+ autorest.WithFile(imageParameter))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DetectObjectsInStreamSender sends the DetectObjectsInStream request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) DetectObjectsInStreamSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DetectObjectsInStreamResponder handles the response to the DetectObjectsInStream request. The method always
+// closes the http.Response Body.
+func (client BaseClient) DetectObjectsInStreamResponder(resp *http.Response) (result DetectResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GenerateThumbnail this operation generates a thumbnail image with the user-specified width and height. By default,
+// the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates
+// based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image.
+// A successful response contains the thumbnail image binary. If the request failed, the response contains an error
+// code and a message to help determine what went wrong.
+// Upon failure, the error code and an error message are returned. The error code could be one of InvalidImageUrl,
+// InvalidImageFormat, InvalidImageSize, InvalidThumbnailSize, NotSupportedImage, FailedToProcess, Timeout, or
+// InternalServerError.
+// Parameters:
+// width - width of the thumbnail, in pixels. It must be between 1 and 1024. Recommended minimum of 50.
+// height - height of the thumbnail, in pixels. It must be between 1 and 1024. Recommended minimum of 50.
+// imageURL - a JSON document with a URL pointing to the image that is to be analyzed.
+// smartCropping - boolean flag for enabling smart cropping.
+func (client BaseClient) GenerateThumbnail(ctx context.Context, width int32, height int32, imageURL ImageURL, smartCropping *bool) (result ReadCloser, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GenerateThumbnail")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: width,
+ Constraints: []validation.Constraint{{Target: "width", Name: validation.InclusiveMaximum, Rule: int64(1024), Chain: nil},
+ {Target: "width", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}}},
+ {TargetValue: height,
+ Constraints: []validation.Constraint{{Target: "height", Name: validation.InclusiveMaximum, Rule: int64(1024), Chain: nil},
+ {Target: "height", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}}},
+ {TargetValue: imageURL,
+ Constraints: []validation.Constraint{{Target: "imageURL.URL", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("computervision.BaseClient", "GenerateThumbnail", err.Error())
+ }
+
+ req, err := client.GenerateThumbnailPreparer(ctx, width, height, imageURL, smartCropping)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "GenerateThumbnail", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GenerateThumbnailSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "GenerateThumbnail", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GenerateThumbnailResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "GenerateThumbnail", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GenerateThumbnailPreparer prepares the GenerateThumbnail request.
+func (client BaseClient) GenerateThumbnailPreparer(ctx context.Context, width int32, height int32, imageURL ImageURL, smartCropping *bool) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "Endpoint": client.Endpoint,
+ }
+
+ queryParameters := map[string]interface{}{
+ "height": autorest.Encode("query", height),
+ "width": autorest.Encode("query", width),
+ }
+ if smartCropping != nil {
+ queryParameters["smartCropping"] = autorest.Encode("query", *smartCropping)
+ } else {
+ queryParameters["smartCropping"] = autorest.Encode("query", false)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{Endpoint}/vision/v2.1", urlParameters),
+ autorest.WithPath("/generateThumbnail"),
+ autorest.WithJSON(imageURL),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GenerateThumbnailSender sends the GenerateThumbnail request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) GenerateThumbnailSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GenerateThumbnailResponder handles the response to the GenerateThumbnail request. The method always
+// closes the http.Response Body.
+func (client BaseClient) GenerateThumbnailResponder(resp *http.Response) (result ReadCloser, err error) {
+ result.Value = &resp.Body
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK))
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GenerateThumbnailInStream this operation generates a thumbnail image with the user-specified width and height. By
+// default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping
+// coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the
+// input image.
+// A successful response contains the thumbnail image binary. If the request failed, the response contains an error
+// code and a message to help determine what went wrong.
+// Upon failure, the error code and an error message are returned. The error code could be one of InvalidImageUrl,
+// InvalidImageFormat, InvalidImageSize, InvalidThumbnailSize, NotSupportedImage, FailedToProcess, Timeout, or
+// InternalServerError.
+// Parameters:
+// width - width of the thumbnail, in pixels. It must be between 1 and 1024. Recommended minimum of 50.
+// height - height of the thumbnail, in pixels. It must be between 1 and 1024. Recommended minimum of 50.
+// imageParameter - an image stream.
+// smartCropping - boolean flag for enabling smart cropping.
+func (client BaseClient) GenerateThumbnailInStream(ctx context.Context, width int32, height int32, imageParameter io.ReadCloser, smartCropping *bool) (result ReadCloser, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GenerateThumbnailInStream")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: width,
+ Constraints: []validation.Constraint{{Target: "width", Name: validation.InclusiveMaximum, Rule: int64(1024), Chain: nil},
+ {Target: "width", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}}},
+ {TargetValue: height,
+ Constraints: []validation.Constraint{{Target: "height", Name: validation.InclusiveMaximum, Rule: int64(1024), Chain: nil},
+ {Target: "height", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("computervision.BaseClient", "GenerateThumbnailInStream", err.Error())
+ }
+
+ req, err := client.GenerateThumbnailInStreamPreparer(ctx, width, height, imageParameter, smartCropping)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "GenerateThumbnailInStream", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GenerateThumbnailInStreamSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "GenerateThumbnailInStream", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GenerateThumbnailInStreamResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "GenerateThumbnailInStream", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GenerateThumbnailInStreamPreparer prepares the GenerateThumbnailInStream request.
+func (client BaseClient) GenerateThumbnailInStreamPreparer(ctx context.Context, width int32, height int32, imageParameter io.ReadCloser, smartCropping *bool) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "Endpoint": client.Endpoint,
+ }
+
+ queryParameters := map[string]interface{}{
+ "height": autorest.Encode("query", height),
+ "width": autorest.Encode("query", width),
+ }
+ if smartCropping != nil {
+ queryParameters["smartCropping"] = autorest.Encode("query", *smartCropping)
+ } else {
+ queryParameters["smartCropping"] = autorest.Encode("query", false)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/octet-stream"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{Endpoint}/vision/v2.1", urlParameters),
+ autorest.WithPath("/generateThumbnail"),
+ autorest.WithFile(imageParameter),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GenerateThumbnailInStreamSender sends the GenerateThumbnailInStream request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) GenerateThumbnailInStreamSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GenerateThumbnailInStreamResponder handles the response to the GenerateThumbnailInStream request. The method always
+// closes the http.Response Body.
+func (client BaseClient) GenerateThumbnailInStreamResponder(resp *http.Response) (result ReadCloser, err error) {
+ result.Value = &resp.Body
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK))
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetAreaOfInterest this operation returns a bounding box around the most important area of the image.
+// A successful response will be returned in JSON. If the request failed, the response contains an error code and a
+// message to help determine what went wrong.
+// Upon failure, the error code and an error message are returned. The error code could be one of InvalidImageUrl,
+// InvalidImageFormat, InvalidImageSize, NotSupportedImage, FailedToProcess, Timeout, or InternalServerError.
+// Parameters:
+// imageURL - a JSON document with a URL pointing to the image that is to be analyzed.
+func (client BaseClient) GetAreaOfInterest(ctx context.Context, imageURL ImageURL) (result AreaOfInterestResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetAreaOfInterest")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: imageURL,
+ Constraints: []validation.Constraint{{Target: "imageURL.URL", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("computervision.BaseClient", "GetAreaOfInterest", err.Error())
+ }
+
+ req, err := client.GetAreaOfInterestPreparer(ctx, imageURL)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "GetAreaOfInterest", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetAreaOfInterestSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "GetAreaOfInterest", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetAreaOfInterestResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "GetAreaOfInterest", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetAreaOfInterestPreparer prepares the GetAreaOfInterest request.
+func (client BaseClient) GetAreaOfInterestPreparer(ctx context.Context, imageURL ImageURL) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "Endpoint": client.Endpoint,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{Endpoint}/vision/v2.1", urlParameters),
+ autorest.WithPath("/areaOfInterest"),
+ autorest.WithJSON(imageURL))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetAreaOfInterestSender sends the GetAreaOfInterest request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) GetAreaOfInterestSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetAreaOfInterestResponder handles the response to the GetAreaOfInterest request. The method always
+// closes the http.Response Body.
+func (client BaseClient) GetAreaOfInterestResponder(resp *http.Response) (result AreaOfInterestResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetAreaOfInterestInStream this operation returns a bounding box around the most important area of the image.
+// A successful response will be returned in JSON. If the request failed, the response contains an error code and a
+// message to help determine what went wrong.
+// Upon failure, the error code and an error message are returned. The error code could be one of InvalidImageUrl,
+// InvalidImageFormat, InvalidImageSize, NotSupportedImage, FailedToProcess, Timeout, or InternalServerError.
+// Parameters:
+// imageParameter - an image stream.
+func (client BaseClient) GetAreaOfInterestInStream(ctx context.Context, imageParameter io.ReadCloser) (result AreaOfInterestResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetAreaOfInterestInStream")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetAreaOfInterestInStreamPreparer(ctx, imageParameter)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "GetAreaOfInterestInStream", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetAreaOfInterestInStreamSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "GetAreaOfInterestInStream", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetAreaOfInterestInStreamResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "GetAreaOfInterestInStream", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetAreaOfInterestInStreamPreparer prepares the GetAreaOfInterestInStream request.
+func (client BaseClient) GetAreaOfInterestInStreamPreparer(ctx context.Context, imageParameter io.ReadCloser) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "Endpoint": client.Endpoint,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/octet-stream"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{Endpoint}/vision/v2.1", urlParameters),
+ autorest.WithPath("/areaOfInterest"),
+ autorest.WithFile(imageParameter))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetAreaOfInterestInStreamSender sends the GetAreaOfInterestInStream request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) GetAreaOfInterestInStreamSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetAreaOfInterestInStreamResponder handles the response to the GetAreaOfInterestInStream request. The method always
+// closes the http.Response Body.
+func (client BaseClient) GetAreaOfInterestInStreamResponder(resp *http.Response) (result AreaOfInterestResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetReadOperationResult this interface is used for getting OCR results of Read operation. The URL to this interface
+// should be retrieved from 'Operation-Location' field returned from Batch Read File interface.
+// Parameters:
+// operationID - id of read operation returned in the response of the 'Batch Read File' interface.
+func (client BaseClient) GetReadOperationResult(ctx context.Context, operationID string) (result ReadOperationResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetReadOperationResult")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetReadOperationResultPreparer(ctx, operationID)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "GetReadOperationResult", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetReadOperationResultSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "GetReadOperationResult", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetReadOperationResultResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "GetReadOperationResult", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetReadOperationResultPreparer prepares the GetReadOperationResult request.
+func (client BaseClient) GetReadOperationResultPreparer(ctx context.Context, operationID string) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "Endpoint": client.Endpoint,
+ }
+
+ pathParameters := map[string]interface{}{
+ "operationId": autorest.Encode("path", operationID),
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{Endpoint}/vision/v2.1", urlParameters),
+ autorest.WithPathParameters("/read/operations/{operationId}", pathParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetReadOperationResultSender sends the GetReadOperationResult request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) GetReadOperationResultSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetReadOperationResultResponder handles the response to the GetReadOperationResult request. The method always
+// closes the http.Response Body.
+func (client BaseClient) GetReadOperationResultResponder(resp *http.Response) (result ReadOperationResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetTextOperationResult this interface is used for getting text operation result. The URL to this interface should be
+// retrieved from 'Operation-Location' field returned from Recognize Text interface.
+// Parameters:
+// operationID - id of the text operation returned in the response of the 'Recognize Text'
+func (client BaseClient) GetTextOperationResult(ctx context.Context, operationID string) (result TextOperationResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetTextOperationResult")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetTextOperationResultPreparer(ctx, operationID)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "GetTextOperationResult", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetTextOperationResultSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "GetTextOperationResult", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetTextOperationResultResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "GetTextOperationResult", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetTextOperationResultPreparer prepares the GetTextOperationResult request.
+func (client BaseClient) GetTextOperationResultPreparer(ctx context.Context, operationID string) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "Endpoint": client.Endpoint,
+ }
+
+ pathParameters := map[string]interface{}{
+ "operationId": autorest.Encode("path", operationID),
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{Endpoint}/vision/v2.1", urlParameters),
+ autorest.WithPathParameters("/textOperations/{operationId}", pathParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetTextOperationResultSender sends the GetTextOperationResult request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) GetTextOperationResultSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetTextOperationResultResponder handles the response to the GetTextOperationResult request. The method always
+// closes the http.Response Body.
+func (client BaseClient) GetTextOperationResultResponder(resp *http.Response) (result TextOperationResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListModels this operation returns the list of domain-specific models that are supported by the Computer Vision API.
+// Currently, the API supports following domain-specific models: celebrity recognizer, landmark recognizer.
+// A successful response will be returned in JSON. If the request failed, the response will contain an error code and a
+// message to help understand what went wrong.
+func (client BaseClient) ListModels(ctx context.Context) (result ListModelsResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.ListModels")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.ListModelsPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "ListModels", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListModelsSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "ListModels", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListModelsResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "ListModels", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListModelsPreparer prepares the ListModels request.
+func (client BaseClient) ListModelsPreparer(ctx context.Context) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "Endpoint": client.Endpoint,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{Endpoint}/vision/v2.1", urlParameters),
+ autorest.WithPath("/models"))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListModelsSender sends the ListModels request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) ListModelsSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListModelsResponder handles the response to the ListModels request. The method always
+// closes the http.Response Body.
+func (client BaseClient) ListModelsResponder(resp *http.Response) (result ListModelsResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// RecognizePrintedText optical Character Recognition (OCR) detects text in an image and extracts the recognized
+// characters into a machine-usable character stream.
+// Upon success, the OCR results will be returned.
+// Upon failure, the error code together with an error message will be returned. The error code can be one of
+// InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or
+// InternalServerError.
+// Parameters:
+// detectOrientation - whether detect the text orientation in the image. With detectOrientation=true the OCR
+// service tries to detect the image orientation and correct it before further processing (e.g. if it's
+// upside-down).
+// imageURL - a JSON document with a URL pointing to the image that is to be analyzed.
+// language - the BCP-47 language code of the text to be detected in the image. The default value is 'unk'.
+func (client BaseClient) RecognizePrintedText(ctx context.Context, detectOrientation bool, imageURL ImageURL, language OcrLanguages) (result OcrResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RecognizePrintedText")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: imageURL,
+ Constraints: []validation.Constraint{{Target: "imageURL.URL", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("computervision.BaseClient", "RecognizePrintedText", err.Error())
+ }
+
+ req, err := client.RecognizePrintedTextPreparer(ctx, detectOrientation, imageURL, language)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "RecognizePrintedText", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.RecognizePrintedTextSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "RecognizePrintedText", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.RecognizePrintedTextResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "RecognizePrintedText", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// RecognizePrintedTextPreparer prepares the RecognizePrintedText request.
+func (client BaseClient) RecognizePrintedTextPreparer(ctx context.Context, detectOrientation bool, imageURL ImageURL, language OcrLanguages) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "Endpoint": client.Endpoint,
+ }
+
+ queryParameters := map[string]interface{}{
+ "detectOrientation": autorest.Encode("query", detectOrientation),
+ }
+ if len(string(language)) > 0 {
+ queryParameters["language"] = autorest.Encode("query", language)
+ } else {
+ queryParameters["language"] = autorest.Encode("query", "unk")
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{Endpoint}/vision/v2.1", urlParameters),
+ autorest.WithPath("/ocr"),
+ autorest.WithJSON(imageURL),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// RecognizePrintedTextSender sends the RecognizePrintedText request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) RecognizePrintedTextSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// RecognizePrintedTextResponder handles the response to the RecognizePrintedText request. The method always
+// closes the http.Response Body.
+func (client BaseClient) RecognizePrintedTextResponder(resp *http.Response) (result OcrResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// RecognizePrintedTextInStream optical Character Recognition (OCR) detects text in an image and extracts the
+// recognized characters into a machine-usable character stream.
+// Upon success, the OCR results will be returned.
+// Upon failure, the error code together with an error message will be returned. The error code can be one of
+// InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or
+// InternalServerError.
+// Parameters:
+// detectOrientation - whether detect the text orientation in the image. With detectOrientation=true the OCR
+// service tries to detect the image orientation and correct it before further processing (e.g. if it's
+// upside-down).
+// imageParameter - an image stream.
+// language - the BCP-47 language code of the text to be detected in the image. The default value is 'unk'.
+func (client BaseClient) RecognizePrintedTextInStream(ctx context.Context, detectOrientation bool, imageParameter io.ReadCloser, language OcrLanguages) (result OcrResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RecognizePrintedTextInStream")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.RecognizePrintedTextInStreamPreparer(ctx, detectOrientation, imageParameter, language)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "RecognizePrintedTextInStream", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.RecognizePrintedTextInStreamSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "RecognizePrintedTextInStream", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.RecognizePrintedTextInStreamResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "RecognizePrintedTextInStream", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// RecognizePrintedTextInStreamPreparer prepares the RecognizePrintedTextInStream request.
+func (client BaseClient) RecognizePrintedTextInStreamPreparer(ctx context.Context, detectOrientation bool, imageParameter io.ReadCloser, language OcrLanguages) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "Endpoint": client.Endpoint,
+ }
+
+ queryParameters := map[string]interface{}{
+ "detectOrientation": autorest.Encode("query", detectOrientation),
+ }
+ if len(string(language)) > 0 {
+ queryParameters["language"] = autorest.Encode("query", language)
+ } else {
+ queryParameters["language"] = autorest.Encode("query", "unk")
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/octet-stream"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{Endpoint}/vision/v2.1", urlParameters),
+ autorest.WithPath("/ocr"),
+ autorest.WithFile(imageParameter),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// RecognizePrintedTextInStreamSender sends the RecognizePrintedTextInStream request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) RecognizePrintedTextInStreamSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// RecognizePrintedTextInStreamResponder handles the response to the RecognizePrintedTextInStream request. The method always
+// closes the http.Response Body.
+func (client BaseClient) RecognizePrintedTextInStreamResponder(resp *http.Response) (result OcrResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// RecognizeText recognize Text operation. When you use the Recognize Text interface, the response contains a field
+// called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get
+// Recognize Text Operation Result operation.
+// Parameters:
+// imageURL - a JSON document with a URL pointing to the image that is to be analyzed.
+// mode - type of text to recognize.
+func (client BaseClient) RecognizeText(ctx context.Context, imageURL ImageURL, mode TextRecognitionMode) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RecognizeText")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: imageURL,
+ Constraints: []validation.Constraint{{Target: "imageURL.URL", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("computervision.BaseClient", "RecognizeText", err.Error())
+ }
+
+ req, err := client.RecognizeTextPreparer(ctx, imageURL, mode)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "RecognizeText", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.RecognizeTextSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "RecognizeText", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.RecognizeTextResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "RecognizeText", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// RecognizeTextPreparer prepares the RecognizeText request.
+func (client BaseClient) RecognizeTextPreparer(ctx context.Context, imageURL ImageURL, mode TextRecognitionMode) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "Endpoint": client.Endpoint,
+ }
+
+ queryParameters := map[string]interface{}{
+ "mode": autorest.Encode("query", mode),
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{Endpoint}/vision/v2.1", urlParameters),
+ autorest.WithPath("/recognizeText"),
+ autorest.WithJSON(imageURL),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// RecognizeTextSender sends the RecognizeText request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) RecognizeTextSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// RecognizeTextResponder handles the response to the RecognizeText request. The method always
+// closes the http.Response Body.
+func (client BaseClient) RecognizeTextResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// RecognizeTextInStream recognize Text operation. When you use the Recognize Text interface, the response contains a
+// field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get
+// Recognize Text Operation Result operation.
+// Parameters:
+// imageParameter - an image stream.
+// mode - type of text to recognize.
+func (client BaseClient) RecognizeTextInStream(ctx context.Context, imageParameter io.ReadCloser, mode TextRecognitionMode) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RecognizeTextInStream")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.RecognizeTextInStreamPreparer(ctx, imageParameter, mode)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "RecognizeTextInStream", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.RecognizeTextInStreamSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "RecognizeTextInStream", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.RecognizeTextInStreamResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "RecognizeTextInStream", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// RecognizeTextInStreamPreparer prepares the RecognizeTextInStream request.
+func (client BaseClient) RecognizeTextInStreamPreparer(ctx context.Context, imageParameter io.ReadCloser, mode TextRecognitionMode) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "Endpoint": client.Endpoint,
+ }
+
+ queryParameters := map[string]interface{}{
+ "mode": autorest.Encode("query", mode),
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/octet-stream"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{Endpoint}/vision/v2.1", urlParameters),
+ autorest.WithPath("/recognizeText"),
+ autorest.WithFile(imageParameter),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// RecognizeTextInStreamSender sends the RecognizeTextInStream request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) RecognizeTextInStreamSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// RecognizeTextInStreamResponder handles the response to the RecognizeTextInStream request. The method always
+// closes the http.Response Body.
+func (client BaseClient) RecognizeTextInStreamResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// TagImage this operation generates a list of words, or tags, that are relevant to the content of the supplied image.
+// The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike
+// categories, tags are not organized according to a hierarchical classification system, but correspond to image
+// content. Tags may contain hints to avoid ambiguity or provide context, for example the tag "ascomycete" may be
+// accompanied by the hint "fungus".
+// Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.
+// A successful response will be returned in JSON. If the request failed, the response will contain an error code and a
+// message to help understand what went wrong.
+// Parameters:
+// imageURL - a JSON document with a URL pointing to the image that is to be analyzed.
+// language - the desired language for output generation. If this parameter is not specified, the default value
+// is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese,
+// zh - Simplified Chinese.
+func (client BaseClient) TagImage(ctx context.Context, imageURL ImageURL, language string) (result TagResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.TagImage")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: imageURL,
+ Constraints: []validation.Constraint{{Target: "imageURL.URL", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("computervision.BaseClient", "TagImage", err.Error())
+ }
+
+ req, err := client.TagImagePreparer(ctx, imageURL, language)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "TagImage", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.TagImageSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "TagImage", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.TagImageResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "TagImage", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// TagImagePreparer prepares the TagImage request.
+func (client BaseClient) TagImagePreparer(ctx context.Context, imageURL ImageURL, language string) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "Endpoint": client.Endpoint,
+ }
+
+ queryParameters := map[string]interface{}{}
+ if len(string(language)) > 0 {
+ queryParameters["language"] = autorest.Encode("query", language)
+ } else {
+ queryParameters["language"] = autorest.Encode("query", "en")
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{Endpoint}/vision/v2.1", urlParameters),
+ autorest.WithPath("/tag"),
+ autorest.WithJSON(imageURL),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// TagImageSender sends the TagImage request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) TagImageSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// TagImageResponder handles the response to the TagImage request. The method always
+// closes the http.Response Body.
+func (client BaseClient) TagImageResponder(resp *http.Response) (result TagResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// TagImageInStream this operation generates a list of words, or tags, that are relevant to the content of the supplied
+// image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images.
+// Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image
+// content. Tags may contain hints to avoid ambiguity or provide context, for example the tag "ascomycete" may be
+// accompanied by the hint "fungus".
+// Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.
+// A successful response will be returned in JSON. If the request failed, the response will contain an error code and a
+// message to help understand what went wrong.
+// Parameters:
+// imageParameter - an image stream.
+// language - the desired language for output generation. If this parameter is not specified, the default value
+// is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese,
+// zh - Simplified Chinese.
+func (client BaseClient) TagImageInStream(ctx context.Context, imageParameter io.ReadCloser, language string) (result TagResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.TagImageInStream")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.TagImageInStreamPreparer(ctx, imageParameter, language)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "TagImageInStream", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.TagImageInStreamSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "TagImageInStream", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.TagImageInStreamResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "computervision.BaseClient", "TagImageInStream", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// TagImageInStreamPreparer prepares the TagImageInStream request.
+func (client BaseClient) TagImageInStreamPreparer(ctx context.Context, imageParameter io.ReadCloser, language string) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "Endpoint": client.Endpoint,
+ }
+
+ queryParameters := map[string]interface{}{}
+ if len(string(language)) > 0 {
+ queryParameters["language"] = autorest.Encode("query", language)
+ } else {
+ queryParameters["language"] = autorest.Encode("query", "en")
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/octet-stream"),
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{Endpoint}/vision/v2.1", urlParameters),
+ autorest.WithPath("/tag"),
+ autorest.WithFile(imageParameter),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// TagImageInStreamSender sends the TagImageInStream request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) TagImageInStreamSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// TagImageInStreamResponder handles the response to the TagImageInStream request. The method always
+// closes the http.Response Body.
+func (client BaseClient) TagImageInStreamResponder(resp *http.Response) (result TagResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/cognitiveservices/v2.1/computervision/computervisionapi/interfaces.go b/services/cognitiveservices/v2.1/computervision/computervisionapi/interfaces.go
new file mode 100644
index 000000000000..d649321e94d9
--- /dev/null
+++ b/services/cognitiveservices/v2.1/computervision/computervisionapi/interfaces.go
@@ -0,0 +1,54 @@
+package computervisionapi
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v2.1/computervision"
+ "github.com/Azure/go-autorest/autorest"
+ "io"
+)
+
+// BaseClientAPI contains the set of methods on the BaseClient type.
+type BaseClientAPI interface {
+ AnalyzeImage(ctx context.Context, imageURL computervision.ImageURL, visualFeatures []computervision.VisualFeatureTypes, details []computervision.Details, language string, descriptionExclude []computervision.DescriptionExclude) (result computervision.ImageAnalysis, err error)
+ AnalyzeImageByDomain(ctx context.Context, model string, imageURL computervision.ImageURL, language string) (result computervision.DomainModelResults, err error)
+ AnalyzeImageByDomainInStream(ctx context.Context, model string, imageParameter io.ReadCloser, language string) (result computervision.DomainModelResults, err error)
+ AnalyzeImageInStream(ctx context.Context, imageParameter io.ReadCloser, visualFeatures []computervision.VisualFeatureTypes, details []computervision.Details, language string, descriptionExclude []computervision.DescriptionExclude) (result computervision.ImageAnalysis, err error)
+ BatchReadFile(ctx context.Context, imageURL computervision.ImageURL) (result autorest.Response, err error)
+ BatchReadFileInStream(ctx context.Context, imageParameter io.ReadCloser) (result autorest.Response, err error)
+ DescribeImage(ctx context.Context, imageURL computervision.ImageURL, maxCandidates *int32, language string, descriptionExclude []computervision.DescriptionExclude) (result computervision.ImageDescription, err error)
+ DescribeImageInStream(ctx context.Context, imageParameter io.ReadCloser, maxCandidates *int32, language string, descriptionExclude []computervision.DescriptionExclude) (result computervision.ImageDescription, err error)
+ DetectObjects(ctx context.Context, imageURL computervision.ImageURL) (result computervision.DetectResult, err error)
+ DetectObjectsInStream(ctx context.Context, imageParameter io.ReadCloser) (result computervision.DetectResult, err error)
+ GenerateThumbnail(ctx context.Context, width int32, height int32, imageURL computervision.ImageURL, smartCropping *bool) (result computervision.ReadCloser, err error)
+ GenerateThumbnailInStream(ctx context.Context, width int32, height int32, imageParameter io.ReadCloser, smartCropping *bool) (result computervision.ReadCloser, err error)
+ GetAreaOfInterest(ctx context.Context, imageURL computervision.ImageURL) (result computervision.AreaOfInterestResult, err error)
+ GetAreaOfInterestInStream(ctx context.Context, imageParameter io.ReadCloser) (result computervision.AreaOfInterestResult, err error)
+ GetReadOperationResult(ctx context.Context, operationID string) (result computervision.ReadOperationResult, err error)
+ GetTextOperationResult(ctx context.Context, operationID string) (result computervision.TextOperationResult, err error)
+ ListModels(ctx context.Context) (result computervision.ListModelsResult, err error)
+ RecognizePrintedText(ctx context.Context, detectOrientation bool, imageURL computervision.ImageURL, language computervision.OcrLanguages) (result computervision.OcrResult, err error)
+ RecognizePrintedTextInStream(ctx context.Context, detectOrientation bool, imageParameter io.ReadCloser, language computervision.OcrLanguages) (result computervision.OcrResult, err error)
+ RecognizeText(ctx context.Context, imageURL computervision.ImageURL, mode computervision.TextRecognitionMode) (result autorest.Response, err error)
+ RecognizeTextInStream(ctx context.Context, imageParameter io.ReadCloser, mode computervision.TextRecognitionMode) (result autorest.Response, err error)
+ TagImage(ctx context.Context, imageURL computervision.ImageURL, language string) (result computervision.TagResult, err error)
+ TagImageInStream(ctx context.Context, imageParameter io.ReadCloser, language string) (result computervision.TagResult, err error)
+}
+
+var _ BaseClientAPI = (*computervision.BaseClient)(nil)
diff --git a/services/cognitiveservices/v2.1/computervision/models.go b/services/cognitiveservices/v2.1/computervision/models.go
new file mode 100644
index 000000000000..3a28f9f9d5eb
--- /dev/null
+++ b/services/cognitiveservices/v2.1/computervision/models.go
@@ -0,0 +1,690 @@
+package computervision
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "encoding/json"
+ "github.com/Azure/go-autorest/autorest"
+ "io"
+)
+
+// The package's fully qualified name.
+const fqdn = "github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v2.1/computervision"
+
+// DescriptionExclude enumerates the values for description exclude.
+type DescriptionExclude string
+
+const (
+ // Celebrities ...
+ Celebrities DescriptionExclude = "Celebrities"
+ // Landmarks ...
+ Landmarks DescriptionExclude = "Landmarks"
+)
+
+// PossibleDescriptionExcludeValues returns an array of possible values for the DescriptionExclude const type.
+func PossibleDescriptionExcludeValues() []DescriptionExclude {
+ return []DescriptionExclude{Celebrities, Landmarks}
+}
+
+// Details enumerates the values for details.
+type Details string
+
+const (
+ // DetailsCelebrities ...
+ DetailsCelebrities Details = "Celebrities"
+ // DetailsLandmarks ...
+ DetailsLandmarks Details = "Landmarks"
+)
+
+// PossibleDetailsValues returns an array of possible values for the Details const type.
+func PossibleDetailsValues() []Details {
+ return []Details{DetailsCelebrities, DetailsLandmarks}
+}
+
+// Gender enumerates the values for gender.
+type Gender string
+
+const (
+ // Female ...
+ Female Gender = "Female"
+ // Male ...
+ Male Gender = "Male"
+)
+
+// PossibleGenderValues returns an array of possible values for the Gender const type.
+func PossibleGenderValues() []Gender {
+ return []Gender{Female, Male}
+}
+
+// OcrLanguages enumerates the values for ocr languages.
+type OcrLanguages string
+
+const (
+ // Ar ...
+ Ar OcrLanguages = "ar"
+ // Cs ...
+ Cs OcrLanguages = "cs"
+ // Da ...
+ Da OcrLanguages = "da"
+ // De ...
+ De OcrLanguages = "de"
+ // El ...
+ El OcrLanguages = "el"
+ // En ...
+ En OcrLanguages = "en"
+ // Es ...
+ Es OcrLanguages = "es"
+ // Fi ...
+ Fi OcrLanguages = "fi"
+ // Fr ...
+ Fr OcrLanguages = "fr"
+ // Hu ...
+ Hu OcrLanguages = "hu"
+ // It ...
+ It OcrLanguages = "it"
+ // Ja ...
+ Ja OcrLanguages = "ja"
+ // Ko ...
+ Ko OcrLanguages = "ko"
+ // Nb ...
+ Nb OcrLanguages = "nb"
+ // Nl ...
+ Nl OcrLanguages = "nl"
+ // Pl ...
+ Pl OcrLanguages = "pl"
+ // Pt ...
+ Pt OcrLanguages = "pt"
+ // Ro ...
+ Ro OcrLanguages = "ro"
+ // Ru ...
+ Ru OcrLanguages = "ru"
+ // Sk ...
+ Sk OcrLanguages = "sk"
+ // SrCyrl ...
+ SrCyrl OcrLanguages = "sr-Cyrl"
+ // SrLatn ...
+ SrLatn OcrLanguages = "sr-Latn"
+ // Sv ...
+ Sv OcrLanguages = "sv"
+ // Tr ...
+ Tr OcrLanguages = "tr"
+ // Unk ...
+ Unk OcrLanguages = "unk"
+ // ZhHans ...
+ ZhHans OcrLanguages = "zh-Hans"
+ // ZhHant ...
+ ZhHant OcrLanguages = "zh-Hant"
+)
+
+// PossibleOcrLanguagesValues returns an array of possible values for the OcrLanguages const type.
+func PossibleOcrLanguagesValues() []OcrLanguages {
+ return []OcrLanguages{Ar, Cs, Da, De, El, En, Es, Fi, Fr, Hu, It, Ja, Ko, Nb, Nl, Pl, Pt, Ro, Ru, Sk, SrCyrl, SrLatn, Sv, Tr, Unk, ZhHans, ZhHant}
+}
+
+// TextOperationStatusCodes enumerates the values for text operation status codes.
+type TextOperationStatusCodes string
+
+const (
+ // Failed ...
+ Failed TextOperationStatusCodes = "Failed"
+ // NotStarted ...
+ NotStarted TextOperationStatusCodes = "NotStarted"
+ // Running ...
+ Running TextOperationStatusCodes = "Running"
+ // Succeeded ...
+ Succeeded TextOperationStatusCodes = "Succeeded"
+)
+
+// PossibleTextOperationStatusCodesValues returns an array of possible values for the TextOperationStatusCodes const type.
+func PossibleTextOperationStatusCodesValues() []TextOperationStatusCodes {
+ return []TextOperationStatusCodes{Failed, NotStarted, Running, Succeeded}
+}
+
+// TextRecognitionMode enumerates the values for text recognition mode.
+type TextRecognitionMode string
+
+const (
+ // Handwritten ...
+ Handwritten TextRecognitionMode = "Handwritten"
+ // Printed ...
+ Printed TextRecognitionMode = "Printed"
+)
+
+// PossibleTextRecognitionModeValues returns an array of possible values for the TextRecognitionMode const type.
+func PossibleTextRecognitionModeValues() []TextRecognitionMode {
+ return []TextRecognitionMode{Handwritten, Printed}
+}
+
+// TextRecognitionResultConfidenceClass enumerates the values for text recognition result confidence class.
+type TextRecognitionResultConfidenceClass string
+
+const (
+ // High ...
+ High TextRecognitionResultConfidenceClass = "High"
+ // Low ...
+ Low TextRecognitionResultConfidenceClass = "Low"
+)
+
+// PossibleTextRecognitionResultConfidenceClassValues returns an array of possible values for the TextRecognitionResultConfidenceClass const type.
+func PossibleTextRecognitionResultConfidenceClassValues() []TextRecognitionResultConfidenceClass {
+ return []TextRecognitionResultConfidenceClass{High, Low}
+}
+
+// TextRecognitionResultDimensionUnit enumerates the values for text recognition result dimension unit.
+type TextRecognitionResultDimensionUnit string
+
+const (
+ // Inch ...
+ Inch TextRecognitionResultDimensionUnit = "inch"
+ // Pixel ...
+ Pixel TextRecognitionResultDimensionUnit = "pixel"
+)
+
+// PossibleTextRecognitionResultDimensionUnitValues returns an array of possible values for the TextRecognitionResultDimensionUnit const type.
+func PossibleTextRecognitionResultDimensionUnitValues() []TextRecognitionResultDimensionUnit {
+ return []TextRecognitionResultDimensionUnit{Inch, Pixel}
+}
+
+// VisualFeatureTypes enumerates the values for visual feature types.
+type VisualFeatureTypes string
+
+const (
+ // VisualFeatureTypesAdult ...
+ VisualFeatureTypesAdult VisualFeatureTypes = "Adult"
+ // VisualFeatureTypesBrands ...
+ VisualFeatureTypesBrands VisualFeatureTypes = "Brands"
+ // VisualFeatureTypesCategories ...
+ VisualFeatureTypesCategories VisualFeatureTypes = "Categories"
+ // VisualFeatureTypesColor ...
+ VisualFeatureTypesColor VisualFeatureTypes = "Color"
+ // VisualFeatureTypesDescription ...
+ VisualFeatureTypesDescription VisualFeatureTypes = "Description"
+ // VisualFeatureTypesFaces ...
+ VisualFeatureTypesFaces VisualFeatureTypes = "Faces"
+ // VisualFeatureTypesImageType ...
+ VisualFeatureTypesImageType VisualFeatureTypes = "ImageType"
+ // VisualFeatureTypesObjects ...
+ VisualFeatureTypesObjects VisualFeatureTypes = "Objects"
+ // VisualFeatureTypesTags ...
+ VisualFeatureTypesTags VisualFeatureTypes = "Tags"
+)
+
+// PossibleVisualFeatureTypesValues returns an array of possible values for the VisualFeatureTypes const type.
+func PossibleVisualFeatureTypesValues() []VisualFeatureTypes {
+ return []VisualFeatureTypes{VisualFeatureTypesAdult, VisualFeatureTypesBrands, VisualFeatureTypesCategories, VisualFeatureTypesColor, VisualFeatureTypesDescription, VisualFeatureTypesFaces, VisualFeatureTypesImageType, VisualFeatureTypesObjects, VisualFeatureTypesTags}
+}
+
+// AdultInfo an object describing whether the image contains adult-oriented content and/or is racy.
+type AdultInfo struct {
+ // IsAdultContent - A value indicating if the image contains adult-oriented content.
+ IsAdultContent *bool `json:"isAdultContent,omitempty"`
+ // IsRacyContent - A value indicating if the image is racy.
+ IsRacyContent *bool `json:"isRacyContent,omitempty"`
+ // IsGoryContent - A value indicating if the image is gory.
+ IsGoryContent *bool `json:"isGoryContent,omitempty"`
+ // AdultScore - Score from 0 to 1 that indicates how much the content is considered adult-oriented within the image.
+ AdultScore *float64 `json:"adultScore,omitempty"`
+ // RacyScore - Score from 0 to 1 that indicates how suggestive is the image.
+ RacyScore *float64 `json:"racyScore,omitempty"`
+ // GoreScore - Score from 0 to 1 that indicates how gory is the image.
+ GoreScore *float64 `json:"goreScore,omitempty"`
+}
+
+// AreaOfInterestResult result of AreaOfInterest operation.
+type AreaOfInterestResult struct {
+ autorest.Response `json:"-"`
+ // AreaOfInterest - READ-ONLY; A bounding box for an area of interest inside an image.
+ AreaOfInterest *BoundingRect `json:"areaOfInterest,omitempty"`
+ // RequestID - Id of the REST API request.
+ RequestID *string `json:"requestId,omitempty"`
+ Metadata *ImageMetadata `json:"metadata,omitempty"`
+}
+
+// BoundingRect a bounding box for an area inside an image.
+type BoundingRect struct {
+ // X - X-coordinate of the top left point of the area, in pixels.
+ X *int32 `json:"x,omitempty"`
+ // Y - Y-coordinate of the top left point of the area, in pixels.
+ Y *int32 `json:"y,omitempty"`
+ // W - Width measured from the top-left point of the area, in pixels.
+ W *int32 `json:"w,omitempty"`
+ // H - Height measured from the top-left point of the area, in pixels.
+ H *int32 `json:"h,omitempty"`
+}
+
+// Category an object describing identified category.
+type Category struct {
+ // Name - Name of the category.
+ Name *string `json:"name,omitempty"`
+ // Score - Scoring of the category.
+ Score *float64 `json:"score,omitempty"`
+ // Detail - Details of the identified category.
+ Detail *CategoryDetail `json:"detail,omitempty"`
+}
+
+// CategoryDetail an object describing additional category details.
+type CategoryDetail struct {
+ // Celebrities - An array of celebrities if any identified.
+ Celebrities *[]CelebritiesModel `json:"celebrities,omitempty"`
+ // Landmarks - An array of landmarks if any identified.
+ Landmarks *[]LandmarksModel `json:"landmarks,omitempty"`
+}
+
+// CelebritiesModel an object describing possible celebrity identification.
+type CelebritiesModel struct {
+ // Name - Name of the celebrity.
+ Name *string `json:"name,omitempty"`
+ // Confidence - Confidence level for the celebrity recognition as a value ranging from 0 to 1.
+ Confidence *float64 `json:"confidence,omitempty"`
+ // FaceRectangle - Location of the identified face in the image.
+ FaceRectangle *FaceRectangle `json:"faceRectangle,omitempty"`
+}
+
+// CelebrityResults result of domain-specific classifications for the domain of celebrities.
+type CelebrityResults struct {
+ // Celebrities - List of celebrities recognized in the image.
+ Celebrities *[]CelebritiesModel `json:"celebrities,omitempty"`
+ // RequestID - Id of the REST API request.
+ RequestID *string `json:"requestId,omitempty"`
+ Metadata *ImageMetadata `json:"metadata,omitempty"`
+}
+
+// ColorInfo an object providing additional metadata describing color attributes.
+type ColorInfo struct {
+ // DominantColorForeground - Possible dominant foreground color.
+ DominantColorForeground *string `json:"dominantColorForeground,omitempty"`
+ // DominantColorBackground - Possible dominant background color.
+ DominantColorBackground *string `json:"dominantColorBackground,omitempty"`
+ // DominantColors - An array of possible dominant colors.
+ DominantColors *[]string `json:"dominantColors,omitempty"`
+ // AccentColor - Possible accent color.
+ AccentColor *string `json:"accentColor,omitempty"`
+ // IsBWImg - A value indicating if the image is black and white.
+ IsBWImg *bool `json:"isBWImg,omitempty"`
+}
+
+// DetectedBrand a brand detected in an image.
+type DetectedBrand struct {
+ // Name - READ-ONLY; Label for the brand.
+ Name *string `json:"name,omitempty"`
+ // Confidence - READ-ONLY; Confidence score of having observed the brand in the image, as a value ranging from 0 to 1.
+ Confidence *float64 `json:"confidence,omitempty"`
+ // Rectangle - READ-ONLY; Approximate location of the detected brand.
+ Rectangle *BoundingRect `json:"rectangle,omitempty"`
+}
+
+// DetectedObject an object detected in an image.
+type DetectedObject struct {
+ // Rectangle - READ-ONLY; Approximate location of the detected object.
+ Rectangle *BoundingRect `json:"rectangle,omitempty"`
+ // Object - Label for the object.
+ Object *string `json:"object,omitempty"`
+ // Confidence - Confidence score of having observed the object in the image, as a value ranging from 0 to 1.
+ Confidence *float64 `json:"confidence,omitempty"`
+ // Parent - The parent object, from a taxonomy perspective.
+ // The parent object is a more generic form of this object. For example, a 'bulldog' would have a parent of 'dog'.
+ Parent *ObjectHierarchy `json:"parent,omitempty"`
+}
+
+// DetectResult result of a DetectImage call.
+type DetectResult struct {
+ autorest.Response `json:"-"`
+ // Objects - READ-ONLY; An array of detected objects.
+ Objects *[]DetectedObject `json:"objects,omitempty"`
+ // RequestID - Id of the REST API request.
+ RequestID *string `json:"requestId,omitempty"`
+ Metadata *ImageMetadata `json:"metadata,omitempty"`
+}
+
+// DomainModelResults result of image analysis using a specific domain model including additional metadata.
+type DomainModelResults struct {
+ autorest.Response `json:"-"`
+ // Result - Model-specific response.
+ Result interface{} `json:"result,omitempty"`
+ // RequestID - Id of the REST API request.
+ RequestID *string `json:"requestId,omitempty"`
+ Metadata *ImageMetadata `json:"metadata,omitempty"`
+}
+
+// Error details about the API request error.
+type Error struct {
+ // Code - The error code.
+ Code interface{} `json:"code,omitempty"`
+ // Message - A message explaining the error reported by the service.
+ Message *string `json:"message,omitempty"`
+ // RequestID - A unique request identifier.
+ RequestID *string `json:"requestId,omitempty"`
+}
+
+// FaceDescription an object describing a face identified in the image.
+type FaceDescription struct {
+ // Age - Possible age of the face.
+ Age *int32 `json:"age,omitempty"`
+ // Gender - Possible gender of the face. Possible values include: 'Male', 'Female'
+ Gender Gender `json:"gender,omitempty"`
+ // FaceRectangle - Rectangle in the image containing the identified face.
+ FaceRectangle *FaceRectangle `json:"faceRectangle,omitempty"`
+}
+
+// FaceRectangle an object describing face rectangle.
+type FaceRectangle struct {
+ // Left - X-coordinate of the top left point of the face, in pixels.
+ Left *int32 `json:"left,omitempty"`
+ // Top - Y-coordinate of the top left point of the face, in pixels.
+ Top *int32 `json:"top,omitempty"`
+ // Width - Width measured from the top-left point of the face, in pixels.
+ Width *int32 `json:"width,omitempty"`
+ // Height - Height measured from the top-left point of the face, in pixels.
+ Height *int32 `json:"height,omitempty"`
+}
+
+// ImageAnalysis result of AnalyzeImage operation.
+type ImageAnalysis struct {
+ autorest.Response `json:"-"`
+ // Categories - An array indicating identified categories.
+ Categories *[]Category `json:"categories,omitempty"`
+ // Adult - An object describing whether the image contains adult-oriented content and/or is racy.
+ Adult *AdultInfo `json:"adult,omitempty"`
+ // Color - An object providing additional metadata describing color attributes.
+ Color *ColorInfo `json:"color,omitempty"`
+ // ImageType - An object providing possible image types and matching confidence levels.
+ ImageType *ImageType `json:"imageType,omitempty"`
+ // Tags - A list of tags with confidence level.
+ Tags *[]ImageTag `json:"tags,omitempty"`
+ // Description - A collection of content tags, along with a list of captions sorted by confidence level, and image metadata.
+ Description *ImageDescriptionDetails `json:"description,omitempty"`
+ // Faces - An array of possible faces within the image.
+ Faces *[]FaceDescription `json:"faces,omitempty"`
+ // Objects - Array of objects describing what was detected in the image.
+ Objects *[]DetectedObject `json:"objects,omitempty"`
+ // Brands - Array of brands detected in the image.
+ Brands *[]DetectedBrand `json:"brands,omitempty"`
+ // RequestID - Id of the REST API request.
+ RequestID *string `json:"requestId,omitempty"`
+ Metadata *ImageMetadata `json:"metadata,omitempty"`
+}
+
+// ImageCaption an image caption, i.e. a brief description of what the image depicts.
+type ImageCaption struct {
+ // Text - The text of the caption.
+ Text *string `json:"text,omitempty"`
+ // Confidence - The level of confidence the service has in the caption.
+ Confidence *float64 `json:"confidence,omitempty"`
+}
+
+// ImageDescription a collection of content tags, along with a list of captions sorted by confidence level,
+// and image metadata.
+type ImageDescription struct {
+ autorest.Response `json:"-"`
+ // ImageDescriptionDetails - A collection of content tags, along with a list of captions sorted by confidence level, and image metadata.
+ *ImageDescriptionDetails `json:"description,omitempty"`
+ // RequestID - Id of the REST API request.
+ RequestID *string `json:"requestId,omitempty"`
+ Metadata *ImageMetadata `json:"metadata,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ImageDescription.
+func (ID ImageDescription) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if ID.ImageDescriptionDetails != nil {
+ objectMap["description"] = ID.ImageDescriptionDetails
+ }
+ if ID.RequestID != nil {
+ objectMap["requestId"] = ID.RequestID
+ }
+ if ID.Metadata != nil {
+ objectMap["metadata"] = ID.Metadata
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ImageDescription struct.
+func (ID *ImageDescription) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "description":
+ if v != nil {
+ var imageDescriptionDetails ImageDescriptionDetails
+ err = json.Unmarshal(*v, &imageDescriptionDetails)
+ if err != nil {
+ return err
+ }
+ ID.ImageDescriptionDetails = &imageDescriptionDetails
+ }
+ case "requestId":
+ if v != nil {
+ var requestID string
+ err = json.Unmarshal(*v, &requestID)
+ if err != nil {
+ return err
+ }
+ ID.RequestID = &requestID
+ }
+ case "metadata":
+ if v != nil {
+ var metadata ImageMetadata
+ err = json.Unmarshal(*v, &metadata)
+ if err != nil {
+ return err
+ }
+ ID.Metadata = &metadata
+ }
+ }
+ }
+
+ return nil
+}
+
+// ImageDescriptionDetails a collection of content tags, along with a list of captions sorted by confidence
+// level, and image metadata.
+type ImageDescriptionDetails struct {
+ // Tags - A collection of image tags.
+ Tags *[]string `json:"tags,omitempty"`
+ // Captions - A list of captions, sorted by confidence level.
+ Captions *[]ImageCaption `json:"captions,omitempty"`
+}
+
+// ImageMetadata image metadata.
+type ImageMetadata struct {
+ // Width - Image width, in pixels.
+ Width *int32 `json:"width,omitempty"`
+ // Height - Image height, in pixels.
+ Height *int32 `json:"height,omitempty"`
+ // Format - Image format.
+ Format *string `json:"format,omitempty"`
+}
+
+// ImageTag an entity observation in the image, along with the confidence score.
+type ImageTag struct {
+ // Name - Name of the entity.
+ Name *string `json:"name,omitempty"`
+ // Confidence - The level of confidence that the entity was observed.
+ Confidence *float64 `json:"confidence,omitempty"`
+ // Hint - Optional hint/details for this tag.
+ Hint *string `json:"hint,omitempty"`
+}
+
+// ImageType an object providing possible image types and matching confidence levels.
+type ImageType struct {
+ // ClipArtType - Confidence level that the image is a clip art.
+ ClipArtType *int32 `json:"clipArtType,omitempty"`
+ // LineDrawingType - Confidence level that the image is a line drawing.
+ LineDrawingType *int32 `json:"lineDrawingType,omitempty"`
+}
+
+// ImageURL ...
+type ImageURL struct {
+ // URL - Publicly reachable URL of an image.
+ URL *string `json:"url,omitempty"`
+}
+
+// LandmarkResults result of domain-specific classifications for the domain of landmarks.
+type LandmarkResults struct {
+ // Landmarks - List of landmarks recognized in the image.
+ Landmarks *[]LandmarksModel `json:"landmarks,omitempty"`
+ // RequestID - Id of the REST API request.
+ RequestID *string `json:"requestId,omitempty"`
+ Metadata *ImageMetadata `json:"metadata,omitempty"`
+}
+
+// LandmarksModel a landmark recognized in the image.
+type LandmarksModel struct {
+ // Name - Name of the landmark.
+ Name *string `json:"name,omitempty"`
+ // Confidence - Confidence level for the landmark recognition as a value ranging from 0 to 1.
+ Confidence *float64 `json:"confidence,omitempty"`
+}
+
+// Line an object representing a recognized text line.
+type Line struct {
+ // BoundingBox - Bounding box of a recognized line.
+ BoundingBox *[]float64 `json:"boundingBox,omitempty"`
+ // Text - The text content of the line.
+ Text *string `json:"text,omitempty"`
+ // Words - List of words in the text line.
+ Words *[]Word `json:"words,omitempty"`
+}
+
+// ListModelsResult result of the List Domain Models operation.
+type ListModelsResult struct {
+ autorest.Response `json:"-"`
+ // ModelsProperty - READ-ONLY; An array of supported models.
+ ModelsProperty *[]ModelDescription `json:"models,omitempty"`
+}
+
+// ModelDescription an object describing supported model by name and categories.
+type ModelDescription struct {
+ // Name - The name of the model.
+ Name *string `json:"name,omitempty"`
+ // Categories - Categories of the model.
+ Categories *[]string `json:"categories,omitempty"`
+}
+
+// ObjectHierarchy an object detected inside an image.
+type ObjectHierarchy struct {
+ // Object - Label for the object.
+ Object *string `json:"object,omitempty"`
+ // Confidence - Confidence score of having observed the object in the image, as a value ranging from 0 to 1.
+ Confidence *float64 `json:"confidence,omitempty"`
+ // Parent - The parent object, from a taxonomy perspective.
+ // The parent object is a more generic form of this object. For example, a 'bulldog' would have a parent of 'dog'.
+ Parent *ObjectHierarchy `json:"parent,omitempty"`
+}
+
+// OcrLine an object describing a single recognized line of text.
+type OcrLine struct {
+ // BoundingBox - Bounding box of a recognized line. The four integers represent the x-coordinate of the left edge, the y-coordinate of the top edge, width, and height of the bounding box, in the coordinate system of the input image, after it has been rotated around its center according to the detected text angle (see textAngle property), with the origin at the top-left corner, and the y-axis pointing down.
+ BoundingBox *string `json:"boundingBox,omitempty"`
+ // Words - An array of objects, where each object represents a recognized word.
+ Words *[]OcrWord `json:"words,omitempty"`
+}
+
+// OcrRegion a region consists of multiple lines (e.g. a column of text in a multi-column document).
+type OcrRegion struct {
+ // BoundingBox - Bounding box of a recognized region. The four integers represent the x-coordinate of the left edge, the y-coordinate of the top edge, width, and height of the bounding box, in the coordinate system of the input image, after it has been rotated around its center according to the detected text angle (see textAngle property), with the origin at the top-left corner, and the y-axis pointing down.
+ BoundingBox *string `json:"boundingBox,omitempty"`
+ // Lines - An array of recognized lines of text.
+ Lines *[]OcrLine `json:"lines,omitempty"`
+}
+
+// OcrResult ...
+type OcrResult struct {
+ autorest.Response `json:"-"`
+ // Language - The BCP-47 language code of the text in the image.
+ Language *string `json:"language,omitempty"`
+ // TextAngle - The angle, in radians, of the detected text with respect to the closest horizontal or vertical direction. After rotating the input image clockwise by this angle, the recognized text lines become horizontal or vertical. In combination with the orientation property it can be used to overlay recognition results correctly on the original image, by rotating either the original image or recognition results by a suitable angle around the center of the original image. If the angle cannot be confidently detected, this property is not present. If the image contains text at different angles, only part of the text will be recognized correctly.
+ TextAngle *float64 `json:"textAngle,omitempty"`
+ // Orientation - Orientation of the text recognized in the image, if requested. The value (up, down, left, or right) refers to the direction that the top of the recognized text is facing, after the image has been rotated around its center according to the detected text angle (see textAngle property).
+ // If detection of the orientation was not requested, or no text is detected, the value is 'NotDetected'.
+ Orientation *string `json:"orientation,omitempty"`
+ // Regions - An array of objects, where each object represents a region of recognized text.
+ Regions *[]OcrRegion `json:"regions,omitempty"`
+}
+
+// OcrWord information on a recognized word.
+type OcrWord struct {
+ // BoundingBox - Bounding box of a recognized word. The four integers represent the x-coordinate of the left edge, the y-coordinate of the top edge, width, and height of the bounding box, in the coordinate system of the input image, after it has been rotated around its center according to the detected text angle (see textAngle property), with the origin at the top-left corner, and the y-axis pointing down.
+ BoundingBox *string `json:"boundingBox,omitempty"`
+ // Text - String value of a recognized word.
+ Text *string `json:"text,omitempty"`
+}
+
+// ReadCloser ...
+type ReadCloser struct {
+ autorest.Response `json:"-"`
+ Value *io.ReadCloser `json:"value,omitempty"`
+}
+
+// ReadOperationResult OCR result of the read operation.
+type ReadOperationResult struct {
+ autorest.Response `json:"-"`
+ // Status - Status of the read operation. Possible values include: 'NotStarted', 'Running', 'Failed', 'Succeeded'
+ Status TextOperationStatusCodes `json:"status,omitempty"`
+ // RecognitionResults - An array of text recognition result of the read operation.
+ RecognitionResults *[]TextRecognitionResult `json:"recognitionResults,omitempty"`
+}
+
+// TagResult the results of a image tag operation, including any tags and image metadata.
+type TagResult struct {
+ autorest.Response `json:"-"`
+ // Tags - A list of tags with confidence level.
+ Tags *[]ImageTag `json:"tags,omitempty"`
+ // RequestID - Id of the REST API request.
+ RequestID *string `json:"requestId,omitempty"`
+ Metadata *ImageMetadata `json:"metadata,omitempty"`
+}
+
+// TextOperationResult result of recognition text operation.
+type TextOperationResult struct {
+ autorest.Response `json:"-"`
+ // Status - Status of the text operation. Possible values include: 'NotStarted', 'Running', 'Failed', 'Succeeded'
+ Status TextOperationStatusCodes `json:"status,omitempty"`
+ // RecognitionResult - Text recognition result of the text operation.
+ RecognitionResult *TextRecognitionResult `json:"recognitionResult,omitempty"`
+}
+
+// TextRecognitionResult an object representing a recognized text region
+type TextRecognitionResult struct {
+ // Page - The 1-based page number of the recognition result.
+ Page *int32 `json:"page,omitempty"`
+ // ClockwiseOrientation - The orientation of the image in degrees in the clockwise direction. Range between [0, 360).
+ ClockwiseOrientation *float64 `json:"clockwiseOrientation,omitempty"`
+ // Width - The width of the image in pixels or the PDF in inches.
+ Width *float64 `json:"width,omitempty"`
+ // Height - The height of the image in pixels or the PDF in inches.
+ Height *float64 `json:"height,omitempty"`
+ // Unit - The unit used in the Width, Height and BoundingBox. For images, the unit is 'pixel'. For PDF, the unit is 'inch'. Possible values include: 'Pixel', 'Inch'
+ Unit TextRecognitionResultDimensionUnit `json:"unit,omitempty"`
+ // Lines - A list of recognized text lines.
+ Lines *[]Line `json:"lines,omitempty"`
+}
+
+// Word an object representing a recognized word.
+type Word struct {
+ // BoundingBox - Bounding box of a recognized word.
+ BoundingBox *[]float64 `json:"boundingBox,omitempty"`
+ // Text - The text content of the word.
+ Text *string `json:"text,omitempty"`
+ // Confidence - Qualitative confidence measure. Possible values include: 'High', 'Low'
+ Confidence TextRecognitionResultConfidenceClass `json:"confidence,omitempty"`
+}
diff --git a/services/cognitiveservices/v2.1/computervision/version.go b/services/cognitiveservices/v2.1/computervision/version.go
new file mode 100644
index 000000000000..11f8318a0de1
--- /dev/null
+++ b/services/cognitiveservices/v2.1/computervision/version.go
@@ -0,0 +1,30 @@
+package computervision
+
+import "github.com/Azure/azure-sdk-for-go/version"
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+// UserAgent returns the UserAgent string to use when sending http.Requests.
+func UserAgent() string {
+ return "Azure-SDK-For-Go/" + version.Number + " computervision/2.1"
+}
+
+// Version returns the semantic version (see http://semver.org) of the client.
+func Version() string {
+ return version.Number
+}
diff --git a/services/cognitiveservices/v4.0/qnamaker/models.go b/services/cognitiveservices/v4.0/qnamaker/models.go
index bf4f36a94223..a5e23760bc24 100644
--- a/services/cognitiveservices/v4.0/qnamaker/models.go
+++ b/services/cognitiveservices/v4.0/qnamaker/models.go
@@ -78,21 +78,6 @@ func PossibleErrorCodeTypeValues() []ErrorCodeType {
return []ErrorCodeType{BadArgument, EndpointKeysError, ExtractionFailure, Forbidden, KbNotFound, NotFound, OperationNotFound, QnaRuntimeError, QuotaExceeded, ServiceError, SKULimitExceeded, Unauthorized, Unspecified, ValidationFailure}
}
-// KnowledgebaseEnvironmentType enumerates the values for knowledgebase environment type.
-type KnowledgebaseEnvironmentType string
-
-const (
- // KnowledgebaseEnvironmentTypeProd ...
- KnowledgebaseEnvironmentTypeProd KnowledgebaseEnvironmentType = "Prod"
- // KnowledgebaseEnvironmentTypeTest ...
- KnowledgebaseEnvironmentTypeTest KnowledgebaseEnvironmentType = "Test"
-)
-
-// PossibleKnowledgebaseEnvironmentTypeValues returns an array of possible values for the KnowledgebaseEnvironmentType const type.
-func PossibleKnowledgebaseEnvironmentTypeValues() []KnowledgebaseEnvironmentType {
- return []KnowledgebaseEnvironmentType{KnowledgebaseEnvironmentTypeProd, KnowledgebaseEnvironmentTypeTest}
-}
-
// OperationStateType enumerates the values for operation state type.
type OperationStateType string
diff --git a/services/compute/mgmt/2019-03-01/compute/computeapi/interfaces.go b/services/compute/mgmt/2019-03-01/compute/computeapi/interfaces.go
index 25b1f3f17e37..9dd226f0493a 100644
--- a/services/compute/mgmt/2019-03-01/compute/computeapi/interfaces.go
+++ b/services/compute/mgmt/2019-03-01/compute/computeapi/interfaces.go
@@ -212,7 +212,7 @@ var _ VirtualMachineScaleSetRollingUpgradesClientAPI = (*compute.VirtualMachineS
type VirtualMachineScaleSetVMsClientAPI interface {
Deallocate(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMsDeallocateFuture, err error)
Delete(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMsDeleteFuture, err error)
- Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVM, err error)
+ Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, expand compute.InstanceViewTypes) (result compute.VirtualMachineScaleSetVM, err error)
GetInstanceView(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMInstanceView, err error)
List(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result compute.VirtualMachineScaleSetVMListResultPage, err error)
PerformMaintenance(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMsPerformMaintenanceFuture, err error)
diff --git a/services/compute/mgmt/2019-03-01/compute/disks.go b/services/compute/mgmt/2019-03-01/compute/disks.go
index 2dee9d74dad5..31ee32f12f42 100644
--- a/services/compute/mgmt/2019-03-01/compute/disks.go
+++ b/services/compute/mgmt/2019-03-01/compute/disks.go
@@ -95,7 +95,7 @@ func (client DisksClient) CreateOrUpdatePreparer(ctx context.Context, resourceGr
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -177,7 +177,7 @@ func (client DisksClient) DeletePreparer(ctx context.Context, resourceGroupName
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -261,7 +261,7 @@ func (client DisksClient) GetPreparer(ctx context.Context, resourceGroupName str
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -341,7 +341,7 @@ func (client DisksClient) GrantAccessPreparer(ctx context.Context, resourceGroup
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -422,7 +422,7 @@ func (client DisksClient) ListPreparer(ctx context.Context) (*http.Request, erro
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -535,7 +535,7 @@ func (client DisksClient) ListByResourceGroupPreparer(ctx context.Context, resou
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -645,7 +645,7 @@ func (client DisksClient) RevokeAccessPreparer(ctx context.Context, resourceGrou
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -724,7 +724,7 @@ func (client DisksClient) UpdatePreparer(ctx context.Context, resourceGroupName
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/services/compute/mgmt/2019-03-01/compute/models.go b/services/compute/mgmt/2019-03-01/compute/models.go
index f231357f948e..e5166f2e83e8 100644
--- a/services/compute/mgmt/2019-03-01/compute/models.go
+++ b/services/compute/mgmt/2019-03-01/compute/models.go
@@ -270,19 +270,22 @@ func PossibleDiffDiskOptionsValues() []DiffDiskOptions {
type DiskCreateOption string
const (
- // Attach ...
+ // Attach Disk will be attached to a VM.
Attach DiskCreateOption = "Attach"
- // Copy ...
+ // Copy Create a new disk or snapshot by copying from a disk or snapshot specified by the given
+ // sourceResourceId.
Copy DiskCreateOption = "Copy"
- // Empty ...
+ // Empty Create an empty data disk of a size given by diskSizeGB.
Empty DiskCreateOption = "Empty"
- // FromImage ...
+ // FromImage Create a new disk from a platform image specified by the given imageReference.
FromImage DiskCreateOption = "FromImage"
- // Import ...
+ // Import Create a disk by importing from a blob specified by a sourceUri in a storage account specified by
+ // storageAccountId.
Import DiskCreateOption = "Import"
- // Restore ...
+ // Restore Create a new disk by copying from a backup recovery point.
Restore DiskCreateOption = "Restore"
- // Upload ...
+ // Upload Create a new disk by obtaining a write token and using it to directly upload the contents of the
+ // disk.
Upload DiskCreateOption = "Upload"
)
@@ -312,17 +315,17 @@ func PossibleDiskCreateOptionTypesValues() []DiskCreateOptionTypes {
type DiskState string
const (
- // ActiveSAS ...
+ // ActiveSAS The disk currently has an Active SAS Uri associated with it.
ActiveSAS DiskState = "ActiveSAS"
- // ActiveUpload ...
+ // ActiveUpload A disk is created for upload and a write token has been issued for uploading to it.
ActiveUpload DiskState = "ActiveUpload"
- // Attached ...
+ // Attached The disk is currently mounted to a running VM.
Attached DiskState = "Attached"
- // ReadyToUpload ...
+ // ReadyToUpload A disk is ready to be created by upload by requesting a write token.
ReadyToUpload DiskState = "ReadyToUpload"
- // Reserved ...
+ // Reserved The disk is mounted to a stopped-deallocated VM
Reserved DiskState = "Reserved"
- // Unattached ...
+ // Unattached The disk is not being used and can be attached to a VM.
Unattached DiskState = "Unattached"
)
@@ -335,13 +338,17 @@ func PossibleDiskStateValues() []DiskState {
type DiskStorageAccountTypes string
const (
- // PremiumLRS ...
+ // PremiumLRS Premium SSD locally redundant storage. Best for production and performance sensitive
+ // workloads.
PremiumLRS DiskStorageAccountTypes = "Premium_LRS"
- // StandardLRS ...
+ // StandardLRS Standard HDD locally redundant storage. Best for backup, non-critical, and infrequent
+ // access.
StandardLRS DiskStorageAccountTypes = "Standard_LRS"
- // StandardSSDLRS ...
+ // StandardSSDLRS Standard SSD locally redundant storage. Best for web servers, lightly used enterprise
+ // applications and dev/test.
StandardSSDLRS DiskStorageAccountTypes = "StandardSSD_LRS"
- // UltraSSDLRS ...
+ // UltraSSDLRS Ultra SSD locally redundant storage. Best for IO-intensive workloads such as SAP HANA, top
+ // tier databases (for example, SQL, Oracle), and other transaction-heavy workloads.
UltraSSDLRS DiskStorageAccountTypes = "UltraSSD_LRS"
)
@@ -794,11 +801,11 @@ func PossibleSettingNamesValues() []SettingNames {
type SnapshotStorageAccountTypes string
const (
- // SnapshotStorageAccountTypesPremiumLRS ...
+ // SnapshotStorageAccountTypesPremiumLRS Premium SSD locally redundant storage
SnapshotStorageAccountTypesPremiumLRS SnapshotStorageAccountTypes = "Premium_LRS"
- // SnapshotStorageAccountTypesStandardLRS ...
+ // SnapshotStorageAccountTypesStandardLRS Standard HDD locally redundant storage
SnapshotStorageAccountTypesStandardLRS SnapshotStorageAccountTypes = "Standard_LRS"
- // SnapshotStorageAccountTypesStandardZRS ...
+ // SnapshotStorageAccountTypesStandardZRS Standard zone redundant storage
SnapshotStorageAccountTypesStandardZRS SnapshotStorageAccountTypes = "Standard_ZRS"
)
@@ -1721,6 +1728,13 @@ func (asu *AvailabilitySetUpdate) UnmarshalJSON(body []byte) error {
return nil
}
+// BillingProfile specifies the billing related details of a low priority VM or VMSS.
Minimum
+// api-version: 2019-03-01.
+type BillingProfile struct {
+ // MaxPrice - Specifies the maximum price you are willing to pay for a low priority VM/VMSS. This price is in US Dollars.
This price will be compared with the current low priority price for the VM size. Also, the prices are compared at the time of create/update of low priority VM/VMSS and the operation will only succeed if the maxPrice is greater than the current low priority price.
The maxPrice will also be used for evicting a low priority VM/VMSS if the current low priority price goes beyond the maxPrice after creation of VM/VMSS.
Possible values are:
- Any decimal value greater than zero. Example: $0.01538
-1 – indicates default price to be up-to on-demand.
You can set the maxPrice to -1 to indicate that the low priority VM/VMSS should not be evicted for price reasons. Also, the default max price is -1 if it is not provided by you.
Minimum api-version: 2019-03-01.
+ MaxPrice *float64 `json:"maxPrice,omitempty"`
+}
+
// BootDiagnostics boot Diagnostics is a debugging feature which allows you to view Console Output and
// Screenshot to diagnose VM status.
You can easily view the output of your console log.
// Azure also enables you to see a screenshot of the VM from the hypervisor.
@@ -2165,6 +2179,10 @@ type CreationData struct {
SourceURI *string `json:"sourceUri,omitempty"`
// SourceResourceID - If createOption is Copy, this is the ARM id of the source snapshot or disk.
SourceResourceID *string `json:"sourceResourceId,omitempty"`
+ // SourceUniqueID - READ-ONLY; If this field is set, this is the unique id identifying the source of this resource.
+ SourceUniqueID *string `json:"sourceUniqueId,omitempty"`
+ // UploadSizeBytes - If createOption is Upload, this is the size of the contents of the upload including the VHD footer. This value should be between 20972032 (20 MiB + 512 bytes for the VHD footer) and 35183298347520 bytes (32 TiB + 512 bytes for the VHD footer).
+ UploadSizeBytes *int64 `json:"uploadSizeBytes,omitempty"`
}
// DataDisk describes a data disk.
@@ -3307,8 +3325,12 @@ type DiskProperties struct {
HyperVGeneration HyperVGeneration `json:"hyperVGeneration,omitempty"`
// CreationData - Disk source information. CreationData information cannot be changed after the disk has been created.
CreationData *CreationData `json:"creationData,omitempty"`
- // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size.
+ // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the disk to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size.
DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
+ // DiskSizeBytes - READ-ONLY; The size of the disk in bytes. This field is read only.
+ DiskSizeBytes *int64 `json:"diskSizeBytes,omitempty"`
+ // UniqueID - READ-ONLY; Unique Guid identifying the resource.
+ UniqueID *string `json:"uniqueId,omitempty"`
// EncryptionSettingsCollection - Encryption settings collection used for Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot.
EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"`
// ProvisioningState - READ-ONLY; The disk provisioning state.
@@ -3529,7 +3551,7 @@ func (du *DiskUpdate) UnmarshalJSON(body []byte) error {
type DiskUpdateProperties struct {
// OsType - the Operating System type. Possible values include: 'Windows', 'Linux'
OsType OperatingSystemTypes `json:"osType,omitempty"`
- // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size.
+ // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the disk to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size.
DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
// EncryptionSettingsCollection - Encryption settings collection used be Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot.
EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"`
@@ -3545,6 +3567,8 @@ type EncryptionSettingsCollection struct {
Enabled *bool `json:"enabled,omitempty"`
// EncryptionSettings - A collection of encryption settings, one for each disk volume.
EncryptionSettings *[]EncryptionSettingsElement `json:"encryptionSettings,omitempty"`
+ // EncryptionSettingsVersion - Describes what type of encryption is used for the disks. Once this field is set, it cannot be overwritten. '1.0' corresponds to Azure Disk Encryption with AAD app.'1.1' corresponds to Azure Disk Encryption.
+ EncryptionSettingsVersion *string `json:"encryptionSettingsVersion,omitempty"`
}
// EncryptionSettingsElement encryption settings for one disk volume.
@@ -4665,7 +4689,7 @@ type GalleryImageProperties struct {
ReleaseNoteURI *string `json:"releaseNoteUri,omitempty"`
// OsType - This property allows you to specify the type of the OS that is included in the disk when creating a VM from a managed image.
Possible values are:
**Windows**
**Linux**. Possible values include: 'Windows', 'Linux'
OsType OperatingSystemTypes `json:"osType,omitempty"`
- // OsState - The allowed values for OS State are 'Generalized'. Possible values include: 'Generalized', 'Specialized'
+ // OsState - This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'. Possible values include: 'Generalized', 'Specialized'
OsState OperatingSystemStateTypes `json:"osState,omitempty"`
// EndOfLifeDate - The end of life date of the gallery Image Definition. This property can be used for decommissioning purposes. This property is updatable.
EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"`
@@ -5056,7 +5080,7 @@ func (future *GalleryImageVersionsDeleteFuture) Result(client GalleryImageVersio
return
}
-// GalleryImageVersionStorageProfile this is the storage profile of a gallery Image Version.
+// GalleryImageVersionStorageProfile this is the storage profile of a Gallery Image Version.
type GalleryImageVersionStorageProfile struct {
// OsDiskImage - READ-ONLY
OsDiskImage *GalleryOSDiskImage `json:"osDiskImage,omitempty"`
@@ -7267,6 +7291,12 @@ type RunCommandResult struct {
Value *[]InstanceViewStatus `json:"value,omitempty"`
}
+// ScheduledEventsProfile ...
+type ScheduledEventsProfile struct {
+ // TerminateNotificationProfile - Specifies Terminate Scheduled Event related configurations.
+ TerminateNotificationProfile *TerminateNotificationProfile `json:"terminateNotificationProfile,omitempty"`
+}
+
// Sku describes a virtual machine scale set sku.
type Sku struct {
// Name - The sku name.
@@ -7557,12 +7587,18 @@ type SnapshotProperties struct {
HyperVGeneration HyperVGeneration `json:"hyperVGeneration,omitempty"`
// CreationData - Disk source information. CreationData information cannot be changed after the disk has been created.
CreationData *CreationData `json:"creationData,omitempty"`
- // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size.
+ // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the disk to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size.
DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
+ // DiskSizeBytes - READ-ONLY; The size of the disk in bytes. This field is read only.
+ DiskSizeBytes *int64 `json:"diskSizeBytes,omitempty"`
+ // UniqueID - READ-ONLY; Unique Guid identifying the resource.
+ UniqueID *string `json:"uniqueId,omitempty"`
// EncryptionSettingsCollection - Encryption settings collection used be Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot.
EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"`
// ProvisioningState - READ-ONLY; The disk provisioning state.
ProvisioningState *string `json:"provisioningState,omitempty"`
+ // Incremental - Whether a snapshot is incremental. Incremental snapshots on the same disk occupy less space than full snapshots and can be diffed.
+ Incremental *bool `json:"incremental,omitempty"`
}
// SnapshotsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
@@ -7775,7 +7811,7 @@ func (su *SnapshotUpdate) UnmarshalJSON(body []byte) error {
type SnapshotUpdateProperties struct {
// OsType - the Operating System type. Possible values include: 'Windows', 'Linux'
OsType OperatingSystemTypes `json:"osType,omitempty"`
- // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size.
+ // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the disk to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size.
DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
// EncryptionSettingsCollection - Encryption settings collection used be Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot.
EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"`
@@ -7835,6 +7871,14 @@ type TargetRegion struct {
StorageAccountType StorageAccountType `json:"storageAccountType,omitempty"`
}
+// TerminateNotificationProfile ...
+type TerminateNotificationProfile struct {
+ // NotBeforeTimeout - Configurable length of time a Virtual Machine being deleted will have to potentially approve the Terminate Scheduled Event before the event is auto approved (timed out). The configuration must be specified in ISO 8601 format, the default value is 5 minutes (PT5M)
+ NotBeforeTimeout *string `json:"notBeforeTimeout,omitempty"`
+ // Enable - Specifies whether the Terminate Scheduled event is enabled or disabled.
+ Enable *bool `json:"enable,omitempty"`
+}
+
// ThrottledRequestsInput api request input for LogAnalytics getThrottledRequests Api.
type ThrottledRequestsInput struct {
// BlobContainerSasURI - SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to.
@@ -8709,6 +8753,8 @@ type VirtualMachineImageProperties struct {
OsDiskImage *OSDiskImage `json:"osDiskImage,omitempty"`
DataDiskImages *[]DataDiskImage `json:"dataDiskImages,omitempty"`
AutomaticOSUpgradeProperties *AutomaticOSUpgradeProperties `json:"automaticOSUpgradeProperties,omitempty"`
+ // HyperVGeneration - Possible values include: 'HyperVGenerationTypesV1', 'HyperVGenerationTypesV2'
+ HyperVGeneration HyperVGenerationTypes `json:"hyperVGeneration,omitempty"`
}
// VirtualMachineImageResource virtual machine image resource information.
@@ -8932,10 +8978,18 @@ type VirtualMachineProperties struct {
NetworkProfile *NetworkProfile `json:"networkProfile,omitempty"`
// DiagnosticsProfile - Specifies the boot diagnostic settings state.
Minimum api-version: 2015-06-15.
DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"`
- // AvailabilitySet - Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
For more information on Azure planned maintenance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)
Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set.
+ // AvailabilitySet - Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
For more information on Azure planned maintenance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)
Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set.
This property cannot exist along with a non-null properties.virtualMachineScaleSet reference.
AvailabilitySet *SubResource `json:"availabilitySet,omitempty"`
+ // VirtualMachineScaleSet - Specifies information about the virtual machine scale set that the virtual machine should be assigned to. Virtual machines specified in the same virtual machine scale set are allocated to different nodes to maximize availability. Currently, a VM can only be added to virtual machine scale set at creation time. An existing VM cannot be added to a virtual machine scale set.
This property cannot exist along with a non-null properties.availabilitySet reference.
Minimum api‐version: 2019‐03‐01
+ VirtualMachineScaleSet *SubResource `json:"virtualMachineScaleSet,omitempty"`
// ProximityPlacementGroup - Specifies information about the proximity placement group that the virtual machine should be assigned to.
Minimum api-version: 2018-04-01.
ProximityPlacementGroup *SubResource `json:"proximityPlacementGroup,omitempty"`
+ // Priority - Specifies the priority for the virtual machine.
Minimum api-version: 2019-03-01. Possible values include: 'Regular', 'Low'
+ Priority VirtualMachinePriorityTypes `json:"priority,omitempty"`
+ // EvictionPolicy - Specifies the eviction policy for the low priority virtual machine. Only supported value is 'Deallocate'.
Minimum api-version: 2019-03-01. Possible values include: 'Deallocate', 'Delete'
+ EvictionPolicy VirtualMachineEvictionPolicyTypes `json:"evictionPolicy,omitempty"`
+ // BillingProfile - Specifies the billing related details of a low priority virtual machine.
Minimum api-version: 2019-03-01.
+ BillingProfile *BillingProfile `json:"billingProfile,omitempty"`
// Host - Specifies information about the dedicated host that the virtual machine resides in.
Minimum api-version: 2018-10-01.
Host *SubResource `json:"host,omitempty"`
// ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response.
@@ -11225,6 +11279,10 @@ type VirtualMachineScaleSetUpdateVMProfile struct {
ExtensionProfile *VirtualMachineScaleSetExtensionProfile `json:"extensionProfile,omitempty"`
// LicenseType - The license type, which is for bring your own license scenario.
LicenseType *string `json:"licenseType,omitempty"`
+ // BillingProfile - Specifies the billing related details of a low priority VMSS.
Minimum api-version: 2019-03-01.
+ BillingProfile *BillingProfile `json:"billingProfile,omitempty"`
+ // ScheduledEventsProfile - Specifies Scheduled Event related configurations.
+ ScheduledEventsProfile *ScheduledEventsProfile `json:"scheduledEventsProfile,omitempty"`
}
// VirtualMachineScaleSetVM describes a virtual machine scale set virtual machine.
@@ -11607,6 +11665,10 @@ type VirtualMachineScaleSetVMProfile struct {
Priority VirtualMachinePriorityTypes `json:"priority,omitempty"`
// EvictionPolicy - Specifies the eviction policy for virtual machines in a low priority scale set.
Minimum api-version: 2017-10-30-preview. Possible values include: 'Deallocate', 'Delete'
EvictionPolicy VirtualMachineEvictionPolicyTypes `json:"evictionPolicy,omitempty"`
+ // BillingProfile - Specifies the billing related details of a low priority VMSS.
Minimum api-version: 2019-03-01.
+ BillingProfile *BillingProfile `json:"billingProfile,omitempty"`
+ // ScheduledEventsProfile - Specifies Scheduled Event related configurations.
+ ScheduledEventsProfile *ScheduledEventsProfile `json:"scheduledEventsProfile,omitempty"`
}
// VirtualMachineScaleSetVMProperties describes the properties of a virtual machine scale set virtual
diff --git a/services/compute/mgmt/2019-03-01/compute/snapshots.go b/services/compute/mgmt/2019-03-01/compute/snapshots.go
index dc153b65164e..5fca4e7acd7f 100644
--- a/services/compute/mgmt/2019-03-01/compute/snapshots.go
+++ b/services/compute/mgmt/2019-03-01/compute/snapshots.go
@@ -94,7 +94,7 @@ func (client SnapshotsClient) CreateOrUpdatePreparer(ctx context.Context, resour
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -175,7 +175,7 @@ func (client SnapshotsClient) DeletePreparer(ctx context.Context, resourceGroupN
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -258,7 +258,7 @@ func (client SnapshotsClient) GetPreparer(ctx context.Context, resourceGroupName
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -337,7 +337,7 @@ func (client SnapshotsClient) GrantAccessPreparer(ctx context.Context, resourceG
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -418,7 +418,7 @@ func (client SnapshotsClient) ListPreparer(ctx context.Context) (*http.Request,
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -531,7 +531,7 @@ func (client SnapshotsClient) ListByResourceGroupPreparer(ctx context.Context, r
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -640,7 +640,7 @@ func (client SnapshotsClient) RevokeAccessPreparer(ctx context.Context, resource
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -718,7 +718,7 @@ func (client SnapshotsClient) UpdatePreparer(ctx context.Context, resourceGroupN
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/services/compute/mgmt/2019-03-01/compute/virtualmachinescalesetvms.go b/services/compute/mgmt/2019-03-01/compute/virtualmachinescalesetvms.go
index bf227197eb48..011068c7ad89 100644
--- a/services/compute/mgmt/2019-03-01/compute/virtualmachinescalesetvms.go
+++ b/services/compute/mgmt/2019-03-01/compute/virtualmachinescalesetvms.go
@@ -204,7 +204,8 @@ func (client VirtualMachineScaleSetVMsClient) DeleteResponder(resp *http.Respons
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
// instanceID - the instance ID of the virtual machine.
-func (client VirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVM, err error) {
+// expand - the expand expression to apply on the operation.
+func (client VirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, expand InstanceViewTypes) (result VirtualMachineScaleSetVM, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.Get")
defer func() {
@@ -215,7 +216,7 @@ func (client VirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceG
tracing.EndSpan(ctx, sc, err)
}()
}
- req, err := client.GetPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID)
+ req, err := client.GetPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID, expand)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Get", nil, "Failure preparing request")
return
@@ -237,7 +238,7 @@ func (client VirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceG
}
// GetPreparer prepares the Get request.
-func (client VirtualMachineScaleSetVMsClient) GetPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (*http.Request, error) {
+func (client VirtualMachineScaleSetVMsClient) GetPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, expand InstanceViewTypes) (*http.Request, error) {
pathParameters := map[string]interface{}{
"instanceId": autorest.Encode("path", instanceID),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
@@ -249,6 +250,9 @@ func (client VirtualMachineScaleSetVMsClient) GetPreparer(ctx context.Context, r
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
+ if len(string(expand)) > 0 {
+ queryParameters["$expand"] = autorest.Encode("query", expand)
+ }
preparer := autorest.CreatePreparer(
autorest.AsGet(),
diff --git a/services/compute/mgmt/2019-07-01/compute/computeapi/interfaces.go b/services/compute/mgmt/2019-07-01/compute/computeapi/interfaces.go
index 64b28db0171d..f4658b1dfc43 100644
--- a/services/compute/mgmt/2019-07-01/compute/computeapi/interfaces.go
+++ b/services/compute/mgmt/2019-07-01/compute/computeapi/interfaces.go
@@ -212,7 +212,7 @@ var _ VirtualMachineScaleSetRollingUpgradesClientAPI = (*compute.VirtualMachineS
type VirtualMachineScaleSetVMsClientAPI interface {
Deallocate(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMsDeallocateFuture, err error)
Delete(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMsDeleteFuture, err error)
- Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVM, err error)
+ Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, expand compute.InstanceViewTypes) (result compute.VirtualMachineScaleSetVM, err error)
GetInstanceView(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMInstanceView, err error)
List(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result compute.VirtualMachineScaleSetVMListResultPage, err error)
PerformMaintenance(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMsPerformMaintenanceFuture, err error)
@@ -310,6 +310,26 @@ type GalleryImageVersionsClientAPI interface {
var _ GalleryImageVersionsClientAPI = (*compute.GalleryImageVersionsClient)(nil)
+// GalleryApplicationsClientAPI contains the set of methods on the GalleryApplicationsClient type.
+type GalleryApplicationsClientAPI interface {
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplication compute.GalleryApplication) (result compute.GalleryApplicationsCreateOrUpdateFuture, err error)
+ Delete(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string) (result compute.GalleryApplicationsDeleteFuture, err error)
+ Get(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string) (result compute.GalleryApplication, err error)
+ ListByGallery(ctx context.Context, resourceGroupName string, galleryName string) (result compute.GalleryApplicationListPage, err error)
+}
+
+var _ GalleryApplicationsClientAPI = (*compute.GalleryApplicationsClient)(nil)
+
+// GalleryApplicationVersionsClientAPI contains the set of methods on the GalleryApplicationVersionsClient type.
+type GalleryApplicationVersionsClientAPI interface {
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, galleryApplicationVersion compute.GalleryApplicationVersion) (result compute.GalleryApplicationVersionsCreateOrUpdateFuture, err error)
+ Delete(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string) (result compute.GalleryApplicationVersionsDeleteFuture, err error)
+ Get(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, expand compute.ReplicationStatusTypes) (result compute.GalleryApplicationVersion, err error)
+ ListByGalleryApplication(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string) (result compute.GalleryApplicationVersionListPage, err error)
+}
+
+var _ GalleryApplicationVersionsClientAPI = (*compute.GalleryApplicationVersionsClient)(nil)
+
// ContainerServicesClientAPI contains the set of methods on the ContainerServicesClient type.
type ContainerServicesClientAPI interface {
CreateOrUpdate(ctx context.Context, resourceGroupName string, containerServiceName string, parameters compute.ContainerService) (result compute.ContainerServicesCreateOrUpdateFuture, err error)
diff --git a/services/compute/mgmt/2019-07-01/compute/disks.go b/services/compute/mgmt/2019-07-01/compute/disks.go
index 2dee9d74dad5..31ee32f12f42 100644
--- a/services/compute/mgmt/2019-07-01/compute/disks.go
+++ b/services/compute/mgmt/2019-07-01/compute/disks.go
@@ -95,7 +95,7 @@ func (client DisksClient) CreateOrUpdatePreparer(ctx context.Context, resourceGr
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -177,7 +177,7 @@ func (client DisksClient) DeletePreparer(ctx context.Context, resourceGroupName
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -261,7 +261,7 @@ func (client DisksClient) GetPreparer(ctx context.Context, resourceGroupName str
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -341,7 +341,7 @@ func (client DisksClient) GrantAccessPreparer(ctx context.Context, resourceGroup
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -422,7 +422,7 @@ func (client DisksClient) ListPreparer(ctx context.Context) (*http.Request, erro
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -535,7 +535,7 @@ func (client DisksClient) ListByResourceGroupPreparer(ctx context.Context, resou
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -645,7 +645,7 @@ func (client DisksClient) RevokeAccessPreparer(ctx context.Context, resourceGrou
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -724,7 +724,7 @@ func (client DisksClient) UpdatePreparer(ctx context.Context, resourceGroupName
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/services/compute/mgmt/2019-07-01/compute/galleryapplications.go b/services/compute/mgmt/2019-07-01/compute/galleryapplications.go
new file mode 100644
index 000000000000..c2c651209dee
--- /dev/null
+++ b/services/compute/mgmt/2019-07-01/compute/galleryapplications.go
@@ -0,0 +1,401 @@
+package compute
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// GalleryApplicationsClient is the compute Client
+type GalleryApplicationsClient struct {
+ BaseClient
+}
+
+// NewGalleryApplicationsClient creates an instance of the GalleryApplicationsClient client.
+func NewGalleryApplicationsClient(subscriptionID string) GalleryApplicationsClient {
+ return NewGalleryApplicationsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewGalleryApplicationsClientWithBaseURI creates an instance of the GalleryApplicationsClient client.
+func NewGalleryApplicationsClientWithBaseURI(baseURI string, subscriptionID string) GalleryApplicationsClient {
+ return GalleryApplicationsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate create or update a gallery Application Definition.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// galleryName - the name of the Shared Application Gallery in which the Application Definition is to be
+// created.
+// galleryApplicationName - the name of the gallery Application Definition to be created or updated. The
+// allowed characters are alphabets and numbers with dots, dashes, and periods allowed in the middle. The
+// maximum length is 80 characters.
+// galleryApplication - parameters supplied to the create or update gallery Application operation.
+func (client GalleryApplicationsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplication GalleryApplication) (result GalleryApplicationsCreateOrUpdateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationsClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, galleryName, galleryApplicationName, galleryApplication)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client GalleryApplicationsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplication GalleryApplication) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "galleryApplicationName": autorest.Encode("path", galleryApplicationName),
+ "galleryName": autorest.Encode("path", galleryName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-07-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}", pathParameters),
+ autorest.WithJSON(galleryApplication),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client GalleryApplicationsClient) CreateOrUpdateSender(req *http.Request) (future GalleryApplicationsCreateOrUpdateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client GalleryApplicationsClient) CreateOrUpdateResponder(resp *http.Response) (result GalleryApplication, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete delete a gallery Application.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// galleryName - the name of the Shared Application Gallery in which the Application Definition is to be
+// deleted.
+// galleryApplicationName - the name of the gallery Application Definition to be deleted.
+func (client GalleryApplicationsClient) Delete(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string) (result GalleryApplicationsDeleteFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, resourceGroupName, galleryName, galleryApplicationName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.DeleteSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "Delete", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client GalleryApplicationsClient) DeletePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "galleryApplicationName": autorest.Encode("path", galleryApplicationName),
+ "galleryName": autorest.Encode("path", galleryName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-07-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client GalleryApplicationsClient) DeleteSender(req *http.Request) (future GalleryApplicationsDeleteFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client GalleryApplicationsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get retrieves information about a gallery Application Definition.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// galleryName - the name of the Shared Application Gallery from which the Application Definitions are to be
+// retrieved.
+// galleryApplicationName - the name of the gallery Application Definition to be retrieved.
+func (client GalleryApplicationsClient) Get(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string) (result GalleryApplication, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, resourceGroupName, galleryName, galleryApplicationName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client GalleryApplicationsClient) GetPreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "galleryApplicationName": autorest.Encode("path", galleryApplicationName),
+ "galleryName": autorest.Encode("path", galleryName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-07-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client GalleryApplicationsClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client GalleryApplicationsClient) GetResponder(resp *http.Response) (result GalleryApplication, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByGallery list gallery Application Definitions in a gallery.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// galleryName - the name of the Shared Application Gallery from which Application Definitions are to be
+// listed.
+func (client GalleryApplicationsClient) ListByGallery(ctx context.Context, resourceGroupName string, galleryName string) (result GalleryApplicationListPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationsClient.ListByGallery")
+ defer func() {
+ sc := -1
+ if result.gal.Response.Response != nil {
+ sc = result.gal.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listByGalleryNextResults
+ req, err := client.ListByGalleryPreparer(ctx, resourceGroupName, galleryName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "ListByGallery", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByGallerySender(req)
+ if err != nil {
+ result.gal.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "ListByGallery", resp, "Failure sending request")
+ return
+ }
+
+ result.gal, err = client.ListByGalleryResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "ListByGallery", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByGalleryPreparer prepares the ListByGallery request.
+func (client GalleryApplicationsClient) ListByGalleryPreparer(ctx context.Context, resourceGroupName string, galleryName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "galleryName": autorest.Encode("path", galleryName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-07-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByGallerySender sends the ListByGallery request. The method will close the
+// http.Response Body if it receives an error.
+func (client GalleryApplicationsClient) ListByGallerySender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByGalleryResponder handles the response to the ListByGallery request. The method always
+// closes the http.Response Body.
+func (client GalleryApplicationsClient) ListByGalleryResponder(resp *http.Response) (result GalleryApplicationList, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByGalleryNextResults retrieves the next set of results, if any.
+func (client GalleryApplicationsClient) listByGalleryNextResults(ctx context.Context, lastResults GalleryApplicationList) (result GalleryApplicationList, err error) {
+ req, err := lastResults.galleryApplicationListPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "listByGalleryNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByGallerySender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "listByGalleryNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByGalleryResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "listByGalleryNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByGalleryComplete enumerates all values, automatically crossing page boundaries as required.
+func (client GalleryApplicationsClient) ListByGalleryComplete(ctx context.Context, resourceGroupName string, galleryName string) (result GalleryApplicationListIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationsClient.ListByGallery")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByGallery(ctx, resourceGroupName, galleryName)
+ return
+}
diff --git a/services/compute/mgmt/2019-07-01/compute/galleryapplicationversions.go b/services/compute/mgmt/2019-07-01/compute/galleryapplicationversions.go
new file mode 100644
index 000000000000..40b2dc830e93
--- /dev/null
+++ b/services/compute/mgmt/2019-07-01/compute/galleryapplicationversions.go
@@ -0,0 +1,428 @@
+package compute
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// GalleryApplicationVersionsClient is the compute Client
+type GalleryApplicationVersionsClient struct {
+ BaseClient
+}
+
+// NewGalleryApplicationVersionsClient creates an instance of the GalleryApplicationVersionsClient client.
+func NewGalleryApplicationVersionsClient(subscriptionID string) GalleryApplicationVersionsClient {
+ return NewGalleryApplicationVersionsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewGalleryApplicationVersionsClientWithBaseURI creates an instance of the GalleryApplicationVersionsClient client.
+func NewGalleryApplicationVersionsClientWithBaseURI(baseURI string, subscriptionID string) GalleryApplicationVersionsClient {
+ return GalleryApplicationVersionsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate create or update a gallery Application Version.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// galleryName - the name of the Shared Application Gallery in which the Application Definition resides.
+// galleryApplicationName - the name of the gallery Application Definition in which the Application Version is
+// to be created.
+// galleryApplicationVersionName - the name of the gallery Application Version to be created. Needs to follow
+// semantic version name pattern: The allowed characters are digit and period. Digits must be within the range
+// of a 32-bit integer. Format: ..
+// galleryApplicationVersion - parameters supplied to the create or update gallery Application Version
+// operation.
+func (client GalleryApplicationVersionsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, galleryApplicationVersion GalleryApplicationVersion) (result GalleryApplicationVersionsCreateOrUpdateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationVersionsClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: galleryApplicationVersion,
+ Constraints: []validation.Constraint{{Target: "galleryApplicationVersion.GalleryApplicationVersionProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "galleryApplicationVersion.GalleryApplicationVersionProperties.PublishingProfile", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "galleryApplicationVersion.GalleryApplicationVersionProperties.PublishingProfile.Source", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "galleryApplicationVersion.GalleryApplicationVersionProperties.PublishingProfile.Source.FileName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "galleryApplicationVersion.GalleryApplicationVersionProperties.PublishingProfile.Source.MediaLink", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ }},
+ }}}}}); err != nil {
+ return result, validation.NewError("compute.GalleryApplicationVersionsClient", "CreateOrUpdate", err.Error())
+ }
+
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, galleryName, galleryApplicationName, galleryApplicationVersionName, galleryApplicationVersion)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client GalleryApplicationVersionsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, galleryApplicationVersion GalleryApplicationVersion) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "galleryApplicationName": autorest.Encode("path", galleryApplicationName),
+ "galleryApplicationVersionName": autorest.Encode("path", galleryApplicationVersionName),
+ "galleryName": autorest.Encode("path", galleryName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-07-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}", pathParameters),
+ autorest.WithJSON(galleryApplicationVersion),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client GalleryApplicationVersionsClient) CreateOrUpdateSender(req *http.Request) (future GalleryApplicationVersionsCreateOrUpdateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client GalleryApplicationVersionsClient) CreateOrUpdateResponder(resp *http.Response) (result GalleryApplicationVersion, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete delete a gallery Application Version.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// galleryName - the name of the Shared Application Gallery in which the Application Definition resides.
+// galleryApplicationName - the name of the gallery Application Definition in which the Application Version
+// resides.
+// galleryApplicationVersionName - the name of the gallery Application Version to be deleted.
+func (client GalleryApplicationVersionsClient) Delete(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string) (result GalleryApplicationVersionsDeleteFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationVersionsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, resourceGroupName, galleryName, galleryApplicationName, galleryApplicationVersionName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.DeleteSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "Delete", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client GalleryApplicationVersionsClient) DeletePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "galleryApplicationName": autorest.Encode("path", galleryApplicationName),
+ "galleryApplicationVersionName": autorest.Encode("path", galleryApplicationVersionName),
+ "galleryName": autorest.Encode("path", galleryName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-07-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client GalleryApplicationVersionsClient) DeleteSender(req *http.Request) (future GalleryApplicationVersionsDeleteFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client GalleryApplicationVersionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get retrieves information about a gallery Application Version.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// galleryName - the name of the Shared Application Gallery in which the Application Definition resides.
+// galleryApplicationName - the name of the gallery Application Definition in which the Application Version
+// resides.
+// galleryApplicationVersionName - the name of the gallery Application Version to be retrieved.
+// expand - the expand expression to apply on the operation.
+func (client GalleryApplicationVersionsClient) Get(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, expand ReplicationStatusTypes) (result GalleryApplicationVersion, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationVersionsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, resourceGroupName, galleryName, galleryApplicationName, galleryApplicationVersionName, expand)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client GalleryApplicationVersionsClient) GetPreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, expand ReplicationStatusTypes) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "galleryApplicationName": autorest.Encode("path", galleryApplicationName),
+ "galleryApplicationVersionName": autorest.Encode("path", galleryApplicationVersionName),
+ "galleryName": autorest.Encode("path", galleryName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-07-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(string(expand)) > 0 {
+ queryParameters["$expand"] = autorest.Encode("query", expand)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client GalleryApplicationVersionsClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client GalleryApplicationVersionsClient) GetResponder(resp *http.Response) (result GalleryApplicationVersion, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByGalleryApplication list gallery Application Versions in a gallery Application Definition.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// galleryName - the name of the Shared Application Gallery in which the Application Definition resides.
+// galleryApplicationName - the name of the Shared Application Gallery Application Definition from which the
+// Application Versions are to be listed.
+func (client GalleryApplicationVersionsClient) ListByGalleryApplication(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string) (result GalleryApplicationVersionListPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationVersionsClient.ListByGalleryApplication")
+ defer func() {
+ sc := -1
+ if result.gavl.Response.Response != nil {
+ sc = result.gavl.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listByGalleryApplicationNextResults
+ req, err := client.ListByGalleryApplicationPreparer(ctx, resourceGroupName, galleryName, galleryApplicationName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "ListByGalleryApplication", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByGalleryApplicationSender(req)
+ if err != nil {
+ result.gavl.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "ListByGalleryApplication", resp, "Failure sending request")
+ return
+ }
+
+ result.gavl, err = client.ListByGalleryApplicationResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "ListByGalleryApplication", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByGalleryApplicationPreparer prepares the ListByGalleryApplication request.
+func (client GalleryApplicationVersionsClient) ListByGalleryApplicationPreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "galleryApplicationName": autorest.Encode("path", galleryApplicationName),
+ "galleryName": autorest.Encode("path", galleryName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-07-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByGalleryApplicationSender sends the ListByGalleryApplication request. The method will close the
+// http.Response Body if it receives an error.
+func (client GalleryApplicationVersionsClient) ListByGalleryApplicationSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByGalleryApplicationResponder handles the response to the ListByGalleryApplication request. The method always
+// closes the http.Response Body.
+func (client GalleryApplicationVersionsClient) ListByGalleryApplicationResponder(resp *http.Response) (result GalleryApplicationVersionList, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByGalleryApplicationNextResults retrieves the next set of results, if any.
+func (client GalleryApplicationVersionsClient) listByGalleryApplicationNextResults(ctx context.Context, lastResults GalleryApplicationVersionList) (result GalleryApplicationVersionList, err error) {
+ req, err := lastResults.galleryApplicationVersionListPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "listByGalleryApplicationNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByGalleryApplicationSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "listByGalleryApplicationNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByGalleryApplicationResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "listByGalleryApplicationNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByGalleryApplicationComplete enumerates all values, automatically crossing page boundaries as required.
+func (client GalleryApplicationVersionsClient) ListByGalleryApplicationComplete(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string) (result GalleryApplicationVersionListIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationVersionsClient.ListByGalleryApplication")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByGalleryApplication(ctx, resourceGroupName, galleryName, galleryApplicationName)
+ return
+}
diff --git a/services/compute/mgmt/2019-07-01/compute/models.go b/services/compute/mgmt/2019-07-01/compute/models.go
index b56b2ae1e597..4ec2778a534e 100644
--- a/services/compute/mgmt/2019-07-01/compute/models.go
+++ b/services/compute/mgmt/2019-07-01/compute/models.go
@@ -270,19 +270,22 @@ func PossibleDiffDiskOptionsValues() []DiffDiskOptions {
type DiskCreateOption string
const (
- // Attach ...
+ // Attach Disk will be attached to a VM.
Attach DiskCreateOption = "Attach"
- // Copy ...
+ // Copy Create a new disk or snapshot by copying from a disk or snapshot specified by the given
+ // sourceResourceId.
Copy DiskCreateOption = "Copy"
- // Empty ...
+ // Empty Create an empty data disk of a size given by diskSizeGB.
Empty DiskCreateOption = "Empty"
- // FromImage ...
+ // FromImage Create a new disk from a platform image specified by the given imageReference.
FromImage DiskCreateOption = "FromImage"
- // Import ...
+ // Import Create a disk by importing from a blob specified by a sourceUri in a storage account specified by
+ // storageAccountId.
Import DiskCreateOption = "Import"
- // Restore ...
+ // Restore Create a new disk by copying from a backup recovery point.
Restore DiskCreateOption = "Restore"
- // Upload ...
+ // Upload Create a new disk by obtaining a write token and using it to directly upload the contents of the
+ // disk.
Upload DiskCreateOption = "Upload"
)
@@ -312,17 +315,17 @@ func PossibleDiskCreateOptionTypesValues() []DiskCreateOptionTypes {
type DiskState string
const (
- // ActiveSAS ...
+ // ActiveSAS The disk currently has an Active SAS Uri associated with it.
ActiveSAS DiskState = "ActiveSAS"
- // ActiveUpload ...
+ // ActiveUpload A disk is created for upload and a write token has been issued for uploading to it.
ActiveUpload DiskState = "ActiveUpload"
- // Attached ...
+ // Attached The disk is currently mounted to a running VM.
Attached DiskState = "Attached"
- // ReadyToUpload ...
+ // ReadyToUpload A disk is ready to be created by upload by requesting a write token.
ReadyToUpload DiskState = "ReadyToUpload"
- // Reserved ...
+ // Reserved The disk is mounted to a stopped-deallocated VM
Reserved DiskState = "Reserved"
- // Unattached ...
+ // Unattached The disk is not being used and can be attached to a VM.
Unattached DiskState = "Unattached"
)
@@ -335,13 +338,17 @@ func PossibleDiskStateValues() []DiskState {
type DiskStorageAccountTypes string
const (
- // PremiumLRS ...
+ // PremiumLRS Premium SSD locally redundant storage. Best for production and performance sensitive
+ // workloads.
PremiumLRS DiskStorageAccountTypes = "Premium_LRS"
- // StandardLRS ...
+ // StandardLRS Standard HDD locally redundant storage. Best for backup, non-critical, and infrequent
+ // access.
StandardLRS DiskStorageAccountTypes = "Standard_LRS"
- // StandardSSDLRS ...
+ // StandardSSDLRS Standard SSD locally redundant storage. Best for web servers, lightly used enterprise
+ // applications and dev/test.
StandardSSDLRS DiskStorageAccountTypes = "StandardSSD_LRS"
- // UltraSSDLRS ...
+ // UltraSSDLRS Ultra SSD locally redundant storage. Best for IO-intensive workloads such as SAP HANA, top
+ // tier databases (for example, SQL, Oracle), and other transaction-heavy workloads.
UltraSSDLRS DiskStorageAccountTypes = "UltraSSD_LRS"
)
@@ -605,6 +612,29 @@ func PossibleProvisioningState2Values() []ProvisioningState2 {
return []ProvisioningState2{ProvisioningState2Creating, ProvisioningState2Deleting, ProvisioningState2Failed, ProvisioningState2Migrating, ProvisioningState2Succeeded, ProvisioningState2Updating}
}
+// ProvisioningState3 enumerates the values for provisioning state 3.
+type ProvisioningState3 string
+
+const (
+ // ProvisioningState3Creating ...
+ ProvisioningState3Creating ProvisioningState3 = "Creating"
+ // ProvisioningState3Deleting ...
+ ProvisioningState3Deleting ProvisioningState3 = "Deleting"
+ // ProvisioningState3Failed ...
+ ProvisioningState3Failed ProvisioningState3 = "Failed"
+ // ProvisioningState3Migrating ...
+ ProvisioningState3Migrating ProvisioningState3 = "Migrating"
+ // ProvisioningState3Succeeded ...
+ ProvisioningState3Succeeded ProvisioningState3 = "Succeeded"
+ // ProvisioningState3Updating ...
+ ProvisioningState3Updating ProvisioningState3 = "Updating"
+)
+
+// PossibleProvisioningState3Values returns an array of possible values for the ProvisioningState3 const type.
+func PossibleProvisioningState3Values() []ProvisioningState3 {
+ return []ProvisioningState3{ProvisioningState3Creating, ProvisioningState3Deleting, ProvisioningState3Failed, ProvisioningState3Migrating, ProvisioningState3Succeeded, ProvisioningState3Updating}
+}
+
// ProximityPlacementGroupType enumerates the values for proximity placement group type.
type ProximityPlacementGroupType string
@@ -771,11 +801,11 @@ func PossibleSettingNamesValues() []SettingNames {
type SnapshotStorageAccountTypes string
const (
- // SnapshotStorageAccountTypesPremiumLRS ...
+ // SnapshotStorageAccountTypesPremiumLRS Premium SSD locally redundant storage
SnapshotStorageAccountTypesPremiumLRS SnapshotStorageAccountTypes = "Premium_LRS"
- // SnapshotStorageAccountTypesStandardLRS ...
+ // SnapshotStorageAccountTypesStandardLRS Standard HDD locally redundant storage
SnapshotStorageAccountTypesStandardLRS SnapshotStorageAccountTypes = "Standard_LRS"
- // SnapshotStorageAccountTypesStandardZRS ...
+ // SnapshotStorageAccountTypesStandardZRS Standard zone redundant storage
SnapshotStorageAccountTypesStandardZRS SnapshotStorageAccountTypes = "Standard_ZRS"
)
@@ -1698,6 +1728,13 @@ func (asu *AvailabilitySetUpdate) UnmarshalJSON(body []byte) error {
return nil
}
+// BillingProfile specifies the billing related details of a low priority VM or VMSS.
Minimum
+// api-version: 2019-03-01.
+type BillingProfile struct {
+ // MaxPrice - Specifies the maximum price you are willing to pay for a low priority VM/VMSS. This price is in US Dollars.
This price will be compared with the current low priority price for the VM size. Also, the prices are compared at the time of create/update of low priority VM/VMSS and the operation will only succeed if the maxPrice is greater than the current low priority price.
The maxPrice will also be used for evicting a low priority VM/VMSS if the current low priority price goes beyond the maxPrice after creation of VM/VMSS.
Possible values are:
- Any decimal value greater than zero. Example: $0.01538
-1 – indicates default price to be up-to on-demand.
You can set the maxPrice to -1 to indicate that the low priority VM/VMSS should not be evicted for price reasons. Also, the default max price is -1 if it is not provided by you.
Minimum api-version: 2019-03-01.
+ MaxPrice *float64 `json:"maxPrice,omitempty"`
+}
+
// BootDiagnostics boot Diagnostics is a debugging feature which allows you to view Console Output and
// Screenshot to diagnose VM status.
You can easily view the output of your console log.
// Azure also enables you to see a screenshot of the VM from the hypervisor.
@@ -2142,6 +2179,10 @@ type CreationData struct {
SourceURI *string `json:"sourceUri,omitempty"`
// SourceResourceID - If createOption is Copy, this is the ARM id of the source snapshot or disk.
SourceResourceID *string `json:"sourceResourceId,omitempty"`
+ // SourceUniqueID - READ-ONLY; If this field is set, this is the unique id identifying the source of this resource.
+ SourceUniqueID *string `json:"sourceUniqueId,omitempty"`
+ // UploadSizeBytes - If createOption is Upload, this is the size of the contents of the upload including the VHD footer. This value should be between 20972032 (20 MiB + 512 bytes for the VHD footer) and 35183298347520 bytes (32 TiB + 512 bytes for the VHD footer).
+ UploadSizeBytes *int64 `json:"uploadSizeBytes,omitempty"`
}
// DataDisk describes a data disk.
@@ -3284,8 +3325,12 @@ type DiskProperties struct {
HyperVGeneration HyperVGeneration `json:"hyperVGeneration,omitempty"`
// CreationData - Disk source information. CreationData information cannot be changed after the disk has been created.
CreationData *CreationData `json:"creationData,omitempty"`
- // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size.
+ // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the disk to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size.
DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
+ // DiskSizeBytes - READ-ONLY; The size of the disk in bytes. This field is read only.
+ DiskSizeBytes *int64 `json:"diskSizeBytes,omitempty"`
+ // UniqueID - READ-ONLY; Unique Guid identifying the resource.
+ UniqueID *string `json:"uniqueId,omitempty"`
// EncryptionSettingsCollection - Encryption settings collection used for Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot.
EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"`
// ProvisioningState - READ-ONLY; The disk provisioning state.
@@ -3506,7 +3551,7 @@ func (du *DiskUpdate) UnmarshalJSON(body []byte) error {
type DiskUpdateProperties struct {
// OsType - the Operating System type. Possible values include: 'Windows', 'Linux'
OsType OperatingSystemTypes `json:"osType,omitempty"`
- // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size.
+ // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the disk to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size.
DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
// EncryptionSettingsCollection - Encryption settings collection used be Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot.
EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"`
@@ -3516,78 +3561,496 @@ type DiskUpdateProperties struct {
DiskMBpsReadWrite *int32 `json:"diskMBpsReadWrite,omitempty"`
}
-// EncryptionSettingsCollection encryption settings for disk or snapshot
-type EncryptionSettingsCollection struct {
- // Enabled - Set this flag to true and provide DiskEncryptionKey and optional KeyEncryptionKey to enable encryption. Set this flag to false and remove DiskEncryptionKey and KeyEncryptionKey to disable encryption. If EncryptionSettings is null in the request object, the existing settings remain unchanged.
- Enabled *bool `json:"enabled,omitempty"`
- // EncryptionSettings - A collection of encryption settings, one for each disk volume.
- EncryptionSettings *[]EncryptionSettingsElement `json:"encryptionSettings,omitempty"`
+// EncryptionSettingsCollection encryption settings for disk or snapshot
+type EncryptionSettingsCollection struct {
+ // Enabled - Set this flag to true and provide DiskEncryptionKey and optional KeyEncryptionKey to enable encryption. Set this flag to false and remove DiskEncryptionKey and KeyEncryptionKey to disable encryption. If EncryptionSettings is null in the request object, the existing settings remain unchanged.
+ Enabled *bool `json:"enabled,omitempty"`
+ // EncryptionSettings - A collection of encryption settings, one for each disk volume.
+ EncryptionSettings *[]EncryptionSettingsElement `json:"encryptionSettings,omitempty"`
+ // EncryptionSettingsVersion - Describes what type of encryption is used for the disks. Once this field is set, it cannot be overwritten. '1.0' corresponds to Azure Disk Encryption with AAD app.'1.1' corresponds to Azure Disk Encryption.
+ EncryptionSettingsVersion *string `json:"encryptionSettingsVersion,omitempty"`
+}
+
+// EncryptionSettingsElement encryption settings for one disk volume.
+type EncryptionSettingsElement struct {
+ // DiskEncryptionKey - Key Vault Secret Url and vault id of the disk encryption key
+ DiskEncryptionKey *KeyVaultAndSecretReference `json:"diskEncryptionKey,omitempty"`
+ // KeyEncryptionKey - Key Vault Key Url and vault id of the key encryption key. KeyEncryptionKey is optional and when provided is used to unwrap the disk encryption key.
+ KeyEncryptionKey *KeyVaultAndKeyReference `json:"keyEncryptionKey,omitempty"`
+}
+
+// GalleriesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type GalleriesCreateOrUpdateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *GalleriesCreateOrUpdateFuture) Result(client GalleriesClient) (g Gallery, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "compute.GalleriesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("compute.GalleriesCreateOrUpdateFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if g.Response.Response, err = future.GetResult(sender); err == nil && g.Response.Response.StatusCode != http.StatusNoContent {
+ g, err = client.CreateOrUpdateResponder(g.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "compute.GalleriesCreateOrUpdateFuture", "Result", g.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// GalleriesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type GalleriesDeleteFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *GalleriesDeleteFuture) Result(client GalleriesClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "compute.GalleriesDeleteFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("compute.GalleriesDeleteFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// Gallery specifies information about the Shared Image Gallery that you want to create or update.
+type Gallery struct {
+ autorest.Response `json:"-"`
+ *GalleryProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Resource Id
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Resource name
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Resource type
+ Type *string `json:"type,omitempty"`
+ // Location - Resource location
+ Location *string `json:"location,omitempty"`
+ // Tags - Resource tags
+ Tags map[string]*string `json:"tags"`
+}
+
+// MarshalJSON is the custom marshaler for Gallery.
+func (g Gallery) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if g.GalleryProperties != nil {
+ objectMap["properties"] = g.GalleryProperties
+ }
+ if g.Location != nil {
+ objectMap["location"] = g.Location
+ }
+ if g.Tags != nil {
+ objectMap["tags"] = g.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for Gallery struct.
+func (g *Gallery) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var galleryProperties GalleryProperties
+ err = json.Unmarshal(*v, &galleryProperties)
+ if err != nil {
+ return err
+ }
+ g.GalleryProperties = &galleryProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ g.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ g.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ g.Type = &typeVar
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ g.Location = &location
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ g.Tags = tags
+ }
+ }
+ }
+
+ return nil
+}
+
+// GalleryApplication specifies information about the gallery Application Definition that you want to
+// create or update.
+type GalleryApplication struct {
+ autorest.Response `json:"-"`
+ *GalleryApplicationProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Resource Id
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Resource name
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Resource type
+ Type *string `json:"type,omitempty"`
+ // Location - Resource location
+ Location *string `json:"location,omitempty"`
+ // Tags - Resource tags
+ Tags map[string]*string `json:"tags"`
+}
+
+// MarshalJSON is the custom marshaler for GalleryApplication.
+func (ga GalleryApplication) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if ga.GalleryApplicationProperties != nil {
+ objectMap["properties"] = ga.GalleryApplicationProperties
+ }
+ if ga.Location != nil {
+ objectMap["location"] = ga.Location
+ }
+ if ga.Tags != nil {
+ objectMap["tags"] = ga.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for GalleryApplication struct.
+func (ga *GalleryApplication) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var galleryApplicationProperties GalleryApplicationProperties
+ err = json.Unmarshal(*v, &galleryApplicationProperties)
+ if err != nil {
+ return err
+ }
+ ga.GalleryApplicationProperties = &galleryApplicationProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ ga.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ ga.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ ga.Type = &typeVar
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ ga.Location = &location
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ ga.Tags = tags
+ }
+ }
+ }
+
+ return nil
+}
+
+// GalleryApplicationList the List Gallery Applications operation response.
+type GalleryApplicationList struct {
+ autorest.Response `json:"-"`
+ // Value - A list of Gallery Applications.
+ Value *[]GalleryApplication `json:"value,omitempty"`
+ // NextLink - The uri to fetch the next page of Application Definitions in the Application Gallery. Call ListNext() with this to fetch the next page of gallery Application Definitions.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// GalleryApplicationListIterator provides access to a complete listing of GalleryApplication values.
+type GalleryApplicationListIterator struct {
+ i int
+ page GalleryApplicationListPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *GalleryApplicationListIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationListIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *GalleryApplicationListIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter GalleryApplicationListIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter GalleryApplicationListIterator) Response() GalleryApplicationList {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter GalleryApplicationListIterator) Value() GalleryApplication {
+ if !iter.page.NotDone() {
+ return GalleryApplication{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the GalleryApplicationListIterator type.
+func NewGalleryApplicationListIterator(page GalleryApplicationListPage) GalleryApplicationListIterator {
+ return GalleryApplicationListIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (gal GalleryApplicationList) IsEmpty() bool {
+ return gal.Value == nil || len(*gal.Value) == 0
+}
+
+// galleryApplicationListPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (gal GalleryApplicationList) galleryApplicationListPreparer(ctx context.Context) (*http.Request, error) {
+ if gal.NextLink == nil || len(to.String(gal.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(gal.NextLink)))
+}
+
+// GalleryApplicationListPage contains a page of GalleryApplication values.
+type GalleryApplicationListPage struct {
+ fn func(context.Context, GalleryApplicationList) (GalleryApplicationList, error)
+ gal GalleryApplicationList
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *GalleryApplicationListPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationListPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.gal)
+ if err != nil {
+ return err
+ }
+ page.gal = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *GalleryApplicationListPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page GalleryApplicationListPage) NotDone() bool {
+ return !page.gal.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page GalleryApplicationListPage) Response() GalleryApplicationList {
+ return page.gal
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page GalleryApplicationListPage) Values() []GalleryApplication {
+ if page.gal.IsEmpty() {
+ return nil
+ }
+ return *page.gal.Value
+}
+
+// Creates a new instance of the GalleryApplicationListPage type.
+func NewGalleryApplicationListPage(getNextPage func(context.Context, GalleryApplicationList) (GalleryApplicationList, error)) GalleryApplicationListPage {
+ return GalleryApplicationListPage{fn: getNextPage}
}
-// EncryptionSettingsElement encryption settings for one disk volume.
-type EncryptionSettingsElement struct {
- // DiskEncryptionKey - Key Vault Secret Url and vault id of the disk encryption key
- DiskEncryptionKey *KeyVaultAndSecretReference `json:"diskEncryptionKey,omitempty"`
- // KeyEncryptionKey - Key Vault Key Url and vault id of the key encryption key. KeyEncryptionKey is optional and when provided is used to unwrap the disk encryption key.
- KeyEncryptionKey *KeyVaultAndKeyReference `json:"keyEncryptionKey,omitempty"`
+// GalleryApplicationProperties describes the properties of a gallery Application Definition.
+type GalleryApplicationProperties struct {
+ // Description - The description of this gallery Application Definition resource. This property is updatable.
+ Description *string `json:"description,omitempty"`
+ // Eula - The Eula agreement for the gallery Application Definition.
+ Eula *string `json:"eula,omitempty"`
+ // PrivacyStatementURI - The privacy statement uri.
+ PrivacyStatementURI *string `json:"privacyStatementUri,omitempty"`
+ // ReleaseNoteURI - The release note uri.
+ ReleaseNoteURI *string `json:"releaseNoteUri,omitempty"`
+ // EndOfLifeDate - The end of life date of the gallery Application Definition. This property can be used for decommissioning purposes. This property is updatable.
+ EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"`
+ // SupportedOSType - This property allows you to specify the supported type of the OS that application is built for.
Possible values are:
**Windows**
**Linux**. Possible values include: 'Windows', 'Linux'
+ SupportedOSType OperatingSystemTypes `json:"supportedOSType,omitempty"`
}
-// GalleriesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type GalleriesCreateOrUpdateFuture struct {
+// GalleryApplicationsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type GalleryApplicationsCreateOrUpdateFuture struct {
azure.Future
}
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
-func (future *GalleriesCreateOrUpdateFuture) Result(client GalleriesClient) (g Gallery, err error) {
+func (future *GalleryApplicationsCreateOrUpdateFuture) Result(client GalleryApplicationsClient) (ga GalleryApplication, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleriesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
+ err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
- err = azure.NewAsyncOpIncompleteError("compute.GalleriesCreateOrUpdateFuture")
+ err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationsCreateOrUpdateFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if g.Response.Response, err = future.GetResult(sender); err == nil && g.Response.Response.StatusCode != http.StatusNoContent {
- g, err = client.CreateOrUpdateResponder(g.Response.Response)
+ if ga.Response.Response, err = future.GetResult(sender); err == nil && ga.Response.Response.StatusCode != http.StatusNoContent {
+ ga, err = client.CreateOrUpdateResponder(ga.Response.Response)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleriesCreateOrUpdateFuture", "Result", g.Response.Response, "Failure responding to request")
+ err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsCreateOrUpdateFuture", "Result", ga.Response.Response, "Failure responding to request")
}
}
return
}
-// GalleriesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type GalleriesDeleteFuture struct {
+// GalleryApplicationsDeleteFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type GalleryApplicationsDeleteFuture struct {
azure.Future
}
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
-func (future *GalleriesDeleteFuture) Result(client GalleriesClient) (ar autorest.Response, err error) {
+func (future *GalleryApplicationsDeleteFuture) Result(client GalleryApplicationsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
- err = autorest.NewErrorWithError(err, "compute.GalleriesDeleteFuture", "Result", future.Response(), "Polling failure")
+ err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsDeleteFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
- err = azure.NewAsyncOpIncompleteError("compute.GalleriesDeleteFuture")
+ err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationsDeleteFuture")
return
}
ar.Response = future.Response()
return
}
-// Gallery specifies information about the Shared Image Gallery that you want to create or update.
-type Gallery struct {
- autorest.Response `json:"-"`
- *GalleryProperties `json:"properties,omitempty"`
+// GalleryApplicationVersion specifies information about the gallery Application Version that you want to
+// create or update.
+type GalleryApplicationVersion struct {
+ autorest.Response `json:"-"`
+ *GalleryApplicationVersionProperties `json:"properties,omitempty"`
// ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
// Name - READ-ONLY; Resource name
@@ -3600,23 +4063,23 @@ type Gallery struct {
Tags map[string]*string `json:"tags"`
}
-// MarshalJSON is the custom marshaler for Gallery.
-func (g Gallery) MarshalJSON() ([]byte, error) {
+// MarshalJSON is the custom marshaler for GalleryApplicationVersion.
+func (gav GalleryApplicationVersion) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
- if g.GalleryProperties != nil {
- objectMap["properties"] = g.GalleryProperties
+ if gav.GalleryApplicationVersionProperties != nil {
+ objectMap["properties"] = gav.GalleryApplicationVersionProperties
}
- if g.Location != nil {
- objectMap["location"] = g.Location
+ if gav.Location != nil {
+ objectMap["location"] = gav.Location
}
- if g.Tags != nil {
- objectMap["tags"] = g.Tags
+ if gav.Tags != nil {
+ objectMap["tags"] = gav.Tags
}
return json.Marshal(objectMap)
}
-// UnmarshalJSON is the custom unmarshaler for Gallery struct.
-func (g *Gallery) UnmarshalJSON(body []byte) error {
+// UnmarshalJSON is the custom unmarshaler for GalleryApplicationVersion struct.
+func (gav *GalleryApplicationVersion) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
@@ -3626,12 +4089,12 @@ func (g *Gallery) UnmarshalJSON(body []byte) error {
switch k {
case "properties":
if v != nil {
- var galleryProperties GalleryProperties
- err = json.Unmarshal(*v, &galleryProperties)
+ var galleryApplicationVersionProperties GalleryApplicationVersionProperties
+ err = json.Unmarshal(*v, &galleryApplicationVersionProperties)
if err != nil {
return err
}
- g.GalleryProperties = &galleryProperties
+ gav.GalleryApplicationVersionProperties = &galleryApplicationVersionProperties
}
case "id":
if v != nil {
@@ -3640,7 +4103,7 @@ func (g *Gallery) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- g.ID = &ID
+ gav.ID = &ID
}
case "name":
if v != nil {
@@ -3649,7 +4112,7 @@ func (g *Gallery) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- g.Name = &name
+ gav.Name = &name
}
case "type":
if v != nil {
@@ -3658,7 +4121,7 @@ func (g *Gallery) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- g.Type = &typeVar
+ gav.Type = &typeVar
}
case "location":
if v != nil {
@@ -3667,7 +4130,7 @@ func (g *Gallery) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- g.Location = &location
+ gav.Location = &location
}
case "tags":
if v != nil {
@@ -3676,7 +4139,7 @@ func (g *Gallery) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- g.Tags = tags
+ gav.Tags = tags
}
}
}
@@ -3684,10 +4147,254 @@ func (g *Gallery) UnmarshalJSON(body []byte) error {
return nil
}
+// GalleryApplicationVersionList the List Gallery Application version operation response.
+type GalleryApplicationVersionList struct {
+ autorest.Response `json:"-"`
+ // Value - A list of gallery Application Versions.
+ Value *[]GalleryApplicationVersion `json:"value,omitempty"`
+ // NextLink - The uri to fetch the next page of gallery Application Versions. Call ListNext() with this to fetch the next page of gallery Application Versions.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// GalleryApplicationVersionListIterator provides access to a complete listing of GalleryApplicationVersion
+// values.
+type GalleryApplicationVersionListIterator struct {
+ i int
+ page GalleryApplicationVersionListPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *GalleryApplicationVersionListIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationVersionListIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *GalleryApplicationVersionListIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter GalleryApplicationVersionListIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter GalleryApplicationVersionListIterator) Response() GalleryApplicationVersionList {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter GalleryApplicationVersionListIterator) Value() GalleryApplicationVersion {
+ if !iter.page.NotDone() {
+ return GalleryApplicationVersion{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the GalleryApplicationVersionListIterator type.
+func NewGalleryApplicationVersionListIterator(page GalleryApplicationVersionListPage) GalleryApplicationVersionListIterator {
+ return GalleryApplicationVersionListIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (gavl GalleryApplicationVersionList) IsEmpty() bool {
+ return gavl.Value == nil || len(*gavl.Value) == 0
+}
+
+// galleryApplicationVersionListPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (gavl GalleryApplicationVersionList) galleryApplicationVersionListPreparer(ctx context.Context) (*http.Request, error) {
+ if gavl.NextLink == nil || len(to.String(gavl.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(gavl.NextLink)))
+}
+
+// GalleryApplicationVersionListPage contains a page of GalleryApplicationVersion values.
+type GalleryApplicationVersionListPage struct {
+ fn func(context.Context, GalleryApplicationVersionList) (GalleryApplicationVersionList, error)
+ gavl GalleryApplicationVersionList
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *GalleryApplicationVersionListPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationVersionListPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.gavl)
+ if err != nil {
+ return err
+ }
+ page.gavl = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *GalleryApplicationVersionListPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page GalleryApplicationVersionListPage) NotDone() bool {
+ return !page.gavl.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page GalleryApplicationVersionListPage) Response() GalleryApplicationVersionList {
+ return page.gavl
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page GalleryApplicationVersionListPage) Values() []GalleryApplicationVersion {
+ if page.gavl.IsEmpty() {
+ return nil
+ }
+ return *page.gavl.Value
+}
+
+// Creates a new instance of the GalleryApplicationVersionListPage type.
+func NewGalleryApplicationVersionListPage(getNextPage func(context.Context, GalleryApplicationVersionList) (GalleryApplicationVersionList, error)) GalleryApplicationVersionListPage {
+ return GalleryApplicationVersionListPage{fn: getNextPage}
+}
+
+// GalleryApplicationVersionProperties describes the properties of a gallery Image Version.
+type GalleryApplicationVersionProperties struct {
+ PublishingProfile *GalleryApplicationVersionPublishingProfile `json:"publishingProfile,omitempty"`
+ // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response. Possible values include: 'ProvisioningState1Creating', 'ProvisioningState1Updating', 'ProvisioningState1Failed', 'ProvisioningState1Succeeded', 'ProvisioningState1Deleting', 'ProvisioningState1Migrating'
+ ProvisioningState ProvisioningState1 `json:"provisioningState,omitempty"`
+ // ReplicationStatus - READ-ONLY
+ ReplicationStatus *ReplicationStatus `json:"replicationStatus,omitempty"`
+}
+
+// GalleryApplicationVersionPublishingProfile the publishing profile of a gallery Image Version.
+type GalleryApplicationVersionPublishingProfile struct {
+ Source *UserArtifactSource `json:"source,omitempty"`
+ // ContentType - Optional. May be used to help process this file. The type of file contained in the source, e.g. zip, json, etc.
+ ContentType *string `json:"contentType,omitempty"`
+ // EnableHealthCheck - Optional. Whether or not this application reports health.
+ EnableHealthCheck *bool `json:"enableHealthCheck,omitempty"`
+ // TargetRegions - The target regions where the Image Version is going to be replicated to. This property is updatable.
+ TargetRegions *[]TargetRegion `json:"targetRegions,omitempty"`
+ // ReplicaCount - The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable.
+ ReplicaCount *int32 `json:"replicaCount,omitempty"`
+ // ExcludeFromLatest - If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version.
+ ExcludeFromLatest *bool `json:"excludeFromLatest,omitempty"`
+ // PublishedDate - READ-ONLY; The timestamp for when the gallery Image Version is published.
+ PublishedDate *date.Time `json:"publishedDate,omitempty"`
+ // EndOfLifeDate - The end of life date of the gallery Image Version. This property can be used for decommissioning purposes. This property is updatable.
+ EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"`
+ // StorageAccountType - Specifies the storage account type to be used to store the image. This property is not updatable. Possible values include: 'StorageAccountTypeStandardLRS', 'StorageAccountTypeStandardZRS'
+ StorageAccountType StorageAccountType `json:"storageAccountType,omitempty"`
+}
+
+// GalleryApplicationVersionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results
+// of a long-running operation.
+type GalleryApplicationVersionsCreateOrUpdateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *GalleryApplicationVersionsCreateOrUpdateFuture) Result(client GalleryApplicationVersionsClient) (gav GalleryApplicationVersion, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationVersionsCreateOrUpdateFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if gav.Response.Response, err = future.GetResult(sender); err == nil && gav.Response.Response.StatusCode != http.StatusNoContent {
+ gav, err = client.CreateOrUpdateResponder(gav.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsCreateOrUpdateFuture", "Result", gav.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// GalleryApplicationVersionsDeleteFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type GalleryApplicationVersionsDeleteFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *GalleryApplicationVersionsDeleteFuture) Result(client GalleryApplicationVersionsClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsDeleteFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationVersionsDeleteFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
// GalleryArtifactPublishingProfileBase describes the basic gallery artifact publishing profile.
type GalleryArtifactPublishingProfileBase struct {
// TargetRegions - The target regions where the Image Version is going to be replicated to. This property is updatable.
TargetRegions *[]TargetRegion `json:"targetRegions,omitempty"`
+ // ReplicaCount - The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable.
+ ReplicaCount *int32 `json:"replicaCount,omitempty"`
+ // ExcludeFromLatest - If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version.
+ ExcludeFromLatest *bool `json:"excludeFromLatest,omitempty"`
+ // PublishedDate - READ-ONLY; The timestamp for when the gallery Image Version is published.
+ PublishedDate *date.Time `json:"publishedDate,omitempty"`
+ // EndOfLifeDate - The end of life date of the gallery Image Version. This property can be used for decommissioning purposes. This property is updatable.
+ EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"`
+ // StorageAccountType - Specifies the storage account type to be used to store the image. This property is not updatable. Possible values include: 'StorageAccountTypeStandardLRS', 'StorageAccountTypeStandardZRS'
+ StorageAccountType StorageAccountType `json:"storageAccountType,omitempty"`
+}
+
+// GalleryArtifactSource the source image from which the Image Version is going to be created.
+type GalleryArtifactSource struct {
+ ManagedImage *ManagedArtifact `json:"managedImage,omitempty"`
}
// GalleryArtifactVersionSource the gallery artifact version source.
@@ -4000,8 +4707,8 @@ type GalleryImageProperties struct {
Recommended *RecommendedMachineConfiguration `json:"recommended,omitempty"`
Disallowed *Disallowed `json:"disallowed,omitempty"`
PurchasePlan *ImagePurchasePlan `json:"purchasePlan,omitempty"`
- // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response. Possible values include: 'ProvisioningState1Creating', 'ProvisioningState1Updating', 'ProvisioningState1Failed', 'ProvisioningState1Succeeded', 'ProvisioningState1Deleting', 'ProvisioningState1Migrating'
- ProvisioningState ProvisioningState1 `json:"provisioningState,omitempty"`
+ // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response. Possible values include: 'ProvisioningState2Creating', 'ProvisioningState2Updating', 'ProvisioningState2Failed', 'ProvisioningState2Succeeded', 'ProvisioningState2Deleting', 'ProvisioningState2Migrating'
+ ProvisioningState ProvisioningState2 `json:"provisioningState,omitempty"`
}
// GalleryImagesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
@@ -4306,8 +5013,8 @@ func NewGalleryImageVersionListPage(getNextPage func(context.Context, GalleryIma
// GalleryImageVersionProperties describes the properties of a gallery Image Version.
type GalleryImageVersionProperties struct {
PublishingProfile *GalleryImageVersionPublishingProfile `json:"publishingProfile,omitempty"`
- // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response. Possible values include: 'ProvisioningState2Creating', 'ProvisioningState2Updating', 'ProvisioningState2Failed', 'ProvisioningState2Succeeded', 'ProvisioningState2Deleting', 'ProvisioningState2Migrating'
- ProvisioningState ProvisioningState2 `json:"provisioningState,omitempty"`
+ // ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response. Possible values include: 'ProvisioningState3Creating', 'ProvisioningState3Updating', 'ProvisioningState3Failed', 'ProvisioningState3Succeeded', 'ProvisioningState3Deleting', 'ProvisioningState3Migrating'
+ ProvisioningState ProvisioningState3 `json:"provisioningState,omitempty"`
StorageProfile *GalleryImageVersionStorageProfile `json:"storageProfile,omitempty"`
// ReplicationStatus - READ-ONLY
ReplicationStatus *ReplicationStatus `json:"replicationStatus,omitempty"`
@@ -4315,6 +5022,8 @@ type GalleryImageVersionProperties struct {
// GalleryImageVersionPublishingProfile the publishing profile of a gallery Image Version.
type GalleryImageVersionPublishingProfile struct {
+ // TargetRegions - The target regions where the Image Version is going to be replicated to. This property is updatable.
+ TargetRegions *[]TargetRegion `json:"targetRegions,omitempty"`
// ReplicaCount - The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable.
ReplicaCount *int32 `json:"replicaCount,omitempty"`
// ExcludeFromLatest - If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version.
@@ -4325,8 +5034,6 @@ type GalleryImageVersionPublishingProfile struct {
EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"`
// StorageAccountType - Specifies the storage account type to be used to store the image. This property is not updatable. Possible values include: 'StorageAccountTypeStandardLRS', 'StorageAccountTypeStandardZRS'
StorageAccountType StorageAccountType `json:"storageAccountType,omitempty"`
- // TargetRegions - The target regions where the Image Version is going to be replicated to. This property is updatable.
- TargetRegions *[]TargetRegion `json:"targetRegions,omitempty"`
}
// GalleryImageVersionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
@@ -5374,6 +6081,12 @@ type MaintenanceRedeployStatus struct {
LastOperationMessage *string `json:"lastOperationMessage,omitempty"`
}
+// ManagedArtifact the managed artifact.
+type ManagedArtifact struct {
+ // ID - The managed artifact id.
+ ID *string `json:"id,omitempty"`
+}
+
// ManagedDiskParameters the parameters of a managed disk.
type ManagedDiskParameters struct {
// StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS'
@@ -6587,6 +7300,12 @@ type RunCommandResult struct {
Value *[]InstanceViewStatus `json:"value,omitempty"`
}
+// ScheduledEventsProfile ...
+type ScheduledEventsProfile struct {
+ // TerminateNotificationProfile - Specifies Terminate Scheduled Event related configurations.
+ TerminateNotificationProfile *TerminateNotificationProfile `json:"terminateNotificationProfile,omitempty"`
+}
+
// Sku describes a virtual machine scale set sku.
type Sku struct {
// Name - The sku name.
@@ -6877,12 +7596,18 @@ type SnapshotProperties struct {
HyperVGeneration HyperVGeneration `json:"hyperVGeneration,omitempty"`
// CreationData - Disk source information. CreationData information cannot be changed after the disk has been created.
CreationData *CreationData `json:"creationData,omitempty"`
- // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size.
+ // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the disk to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size.
DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
+ // DiskSizeBytes - READ-ONLY; The size of the disk in bytes. This field is read only.
+ DiskSizeBytes *int64 `json:"diskSizeBytes,omitempty"`
+ // UniqueID - READ-ONLY; Unique Guid identifying the resource.
+ UniqueID *string `json:"uniqueId,omitempty"`
// EncryptionSettingsCollection - Encryption settings collection used be Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot.
EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"`
// ProvisioningState - READ-ONLY; The disk provisioning state.
ProvisioningState *string `json:"provisioningState,omitempty"`
+ // Incremental - Whether a snapshot is incremental. Incremental snapshots on the same disk occupy less space than full snapshots and can be diffed.
+ Incremental *bool `json:"incremental,omitempty"`
}
// SnapshotsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
@@ -7095,7 +7820,7 @@ func (su *SnapshotUpdate) UnmarshalJSON(body []byte) error {
type SnapshotUpdateProperties struct {
// OsType - the Operating System type. Possible values include: 'Windows', 'Linux'
OsType OperatingSystemTypes `json:"osType,omitempty"`
- // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size.
+ // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the disk to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size.
DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
// EncryptionSettingsCollection - Encryption settings collection used be Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot.
EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"`
@@ -7155,6 +7880,14 @@ type TargetRegion struct {
StorageAccountType StorageAccountType `json:"storageAccountType,omitempty"`
}
+// TerminateNotificationProfile ...
+type TerminateNotificationProfile struct {
+ // NotBeforeTimeout - Configurable length of time a Virtual Machine being deleted will have to potentially approve the Terminate Scheduled Event before the event is auto approved (timed out). The configuration must be specified in ISO 8601 format, the default value is 5 minutes (PT5M)
+ NotBeforeTimeout *string `json:"notBeforeTimeout,omitempty"`
+ // Enable - Specifies whether the Terminate Scheduled event is enabled or disabled.
+ Enable *bool `json:"enable,omitempty"`
+}
+
// ThrottledRequestsInput api request input for LogAnalytics getThrottledRequests Api.
type ThrottledRequestsInput struct {
// BlobContainerSasURI - SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to.
@@ -7253,6 +7986,14 @@ type UsageName struct {
LocalizedValue *string `json:"localizedValue,omitempty"`
}
+// UserArtifactSource the source image from which the Image Version is going to be created.
+type UserArtifactSource struct {
+ // FileName - Required. The fileName of the artifact.
+ FileName *string `json:"fileName,omitempty"`
+ // MediaLink - Required. The mediaLink of the artifact, must be a readable storage blob.
+ MediaLink *string `json:"mediaLink,omitempty"`
+}
+
// VaultCertificate describes a single certificate reference in a Key Vault, and where the certificate
// should reside on the VM.
type VaultCertificate struct {
@@ -8021,6 +8762,8 @@ type VirtualMachineImageProperties struct {
OsDiskImage *OSDiskImage `json:"osDiskImage,omitempty"`
DataDiskImages *[]DataDiskImage `json:"dataDiskImages,omitempty"`
AutomaticOSUpgradeProperties *AutomaticOSUpgradeProperties `json:"automaticOSUpgradeProperties,omitempty"`
+ // HyperVGeneration - Possible values include: 'HyperVGenerationTypesV1', 'HyperVGenerationTypesV2'
+ HyperVGeneration HyperVGenerationTypes `json:"hyperVGeneration,omitempty"`
}
// VirtualMachineImageResource virtual machine image resource information.
@@ -8244,10 +8987,18 @@ type VirtualMachineProperties struct {
NetworkProfile *NetworkProfile `json:"networkProfile,omitempty"`
// DiagnosticsProfile - Specifies the boot diagnostic settings state.
Minimum api-version: 2015-06-15.
DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"`
- // AvailabilitySet - Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
For more information on Azure planned maintenance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)
Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set.
+ // AvailabilitySet - Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
For more information on Azure planned maintenance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)
Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set.
This property cannot exist along with a non-null properties.virtualMachineScaleSet reference.
AvailabilitySet *SubResource `json:"availabilitySet,omitempty"`
+ // VirtualMachineScaleSet - Specifies information about the virtual machine scale set that the virtual machine should be assigned to. Virtual machines specified in the same virtual machine scale set are allocated to different nodes to maximize availability. Currently, a VM can only be added to virtual machine scale set at creation time. An existing VM cannot be added to a virtual machine scale set.
This property cannot exist along with a non-null properties.availabilitySet reference.
Minimum api‐version: 2019‐03‐01
+ VirtualMachineScaleSet *SubResource `json:"virtualMachineScaleSet,omitempty"`
// ProximityPlacementGroup - Specifies information about the proximity placement group that the virtual machine should be assigned to.
Minimum api-version: 2018-04-01.
ProximityPlacementGroup *SubResource `json:"proximityPlacementGroup,omitempty"`
+ // Priority - Specifies the priority for the virtual machine.
Minimum api-version: 2019-03-01. Possible values include: 'Regular', 'Low'
+ Priority VirtualMachinePriorityTypes `json:"priority,omitempty"`
+ // EvictionPolicy - Specifies the eviction policy for the low priority virtual machine. Only supported value is 'Deallocate'.
Minimum api-version: 2019-03-01. Possible values include: 'Deallocate', 'Delete'
+ EvictionPolicy VirtualMachineEvictionPolicyTypes `json:"evictionPolicy,omitempty"`
+ // BillingProfile - Specifies the billing related details of a low priority virtual machine.
Minimum api-version: 2019-03-01.
+ BillingProfile *BillingProfile `json:"billingProfile,omitempty"`
// Host - Specifies information about the dedicated host that the virtual machine resides in.
Minimum api-version: 2018-10-01.
Host *SubResource `json:"host,omitempty"`
// ProvisioningState - READ-ONLY; The provisioning state, which only appears in the response.
@@ -10537,6 +11288,10 @@ type VirtualMachineScaleSetUpdateVMProfile struct {
ExtensionProfile *VirtualMachineScaleSetExtensionProfile `json:"extensionProfile,omitempty"`
// LicenseType - The license type, which is for bring your own license scenario.
LicenseType *string `json:"licenseType,omitempty"`
+ // BillingProfile - Specifies the billing related details of a low priority VMSS.
Minimum api-version: 2019-03-01.
+ BillingProfile *BillingProfile `json:"billingProfile,omitempty"`
+ // ScheduledEventsProfile - Specifies Scheduled Event related configurations.
+ ScheduledEventsProfile *ScheduledEventsProfile `json:"scheduledEventsProfile,omitempty"`
}
// VirtualMachineScaleSetVM describes a virtual machine scale set virtual machine.
@@ -10919,6 +11674,10 @@ type VirtualMachineScaleSetVMProfile struct {
Priority VirtualMachinePriorityTypes `json:"priority,omitempty"`
// EvictionPolicy - Specifies the eviction policy for virtual machines in a low priority scale set.
Minimum api-version: 2017-10-30-preview. Possible values include: 'Deallocate', 'Delete'
EvictionPolicy VirtualMachineEvictionPolicyTypes `json:"evictionPolicy,omitempty"`
+ // BillingProfile - Specifies the billing related details of a low priority VMSS.
Minimum api-version: 2019-03-01.
+ BillingProfile *BillingProfile `json:"billingProfile,omitempty"`
+ // ScheduledEventsProfile - Specifies Scheduled Event related configurations.
+ ScheduledEventsProfile *ScheduledEventsProfile `json:"scheduledEventsProfile,omitempty"`
}
// VirtualMachineScaleSetVMProperties describes the properties of a virtual machine scale set virtual
diff --git a/services/compute/mgmt/2019-07-01/compute/snapshots.go b/services/compute/mgmt/2019-07-01/compute/snapshots.go
index dc153b65164e..5fca4e7acd7f 100644
--- a/services/compute/mgmt/2019-07-01/compute/snapshots.go
+++ b/services/compute/mgmt/2019-07-01/compute/snapshots.go
@@ -94,7 +94,7 @@ func (client SnapshotsClient) CreateOrUpdatePreparer(ctx context.Context, resour
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -175,7 +175,7 @@ func (client SnapshotsClient) DeletePreparer(ctx context.Context, resourceGroupN
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -258,7 +258,7 @@ func (client SnapshotsClient) GetPreparer(ctx context.Context, resourceGroupName
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -337,7 +337,7 @@ func (client SnapshotsClient) GrantAccessPreparer(ctx context.Context, resourceG
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -418,7 +418,7 @@ func (client SnapshotsClient) ListPreparer(ctx context.Context) (*http.Request,
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -531,7 +531,7 @@ func (client SnapshotsClient) ListByResourceGroupPreparer(ctx context.Context, r
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -640,7 +640,7 @@ func (client SnapshotsClient) RevokeAccessPreparer(ctx context.Context, resource
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -718,7 +718,7 @@ func (client SnapshotsClient) UpdatePreparer(ctx context.Context, resourceGroupN
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-09-30"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetvms.go b/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetvms.go
index bf227197eb48..011068c7ad89 100644
--- a/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetvms.go
+++ b/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetvms.go
@@ -204,7 +204,8 @@ func (client VirtualMachineScaleSetVMsClient) DeleteResponder(resp *http.Respons
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
// instanceID - the instance ID of the virtual machine.
-func (client VirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVM, err error) {
+// expand - the expand expression to apply on the operation.
+func (client VirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, expand InstanceViewTypes) (result VirtualMachineScaleSetVM, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.Get")
defer func() {
@@ -215,7 +216,7 @@ func (client VirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceG
tracing.EndSpan(ctx, sc, err)
}()
}
- req, err := client.GetPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID)
+ req, err := client.GetPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID, expand)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Get", nil, "Failure preparing request")
return
@@ -237,7 +238,7 @@ func (client VirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceG
}
// GetPreparer prepares the Get request.
-func (client VirtualMachineScaleSetVMsClient) GetPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (*http.Request, error) {
+func (client VirtualMachineScaleSetVMsClient) GetPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, expand InstanceViewTypes) (*http.Request, error) {
pathParameters := map[string]interface{}{
"instanceId": autorest.Encode("path", instanceID),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
@@ -249,6 +250,9 @@ func (client VirtualMachineScaleSetVMsClient) GetPreparer(ctx context.Context, r
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
+ if len(string(expand)) > 0 {
+ queryParameters["$expand"] = autorest.Encode("query", expand)
+ }
preparer := autorest.CreatePreparer(
autorest.AsGet(),
diff --git a/services/containerregistry/mgmt/2019-05-01/containerregistry/models.go b/services/containerregistry/mgmt/2019-05-01/containerregistry/models.go
index 8ab53a369144..cf551f2a5e17 100644
--- a/services/containerregistry/mgmt/2019-05-01/containerregistry/models.go
+++ b/services/containerregistry/mgmt/2019-05-01/containerregistry/models.go
@@ -2024,7 +2024,7 @@ type OperationPropertiesDefinition struct {
ServiceSpecification *OperationServiceSpecificationDefinition `json:"serviceSpecification,omitempty"`
}
-// OperationServiceSpecificationDefinition the definition of Azure Monitoring metrics list.
+// OperationServiceSpecificationDefinition the definition of Azure Monitoring list.
type OperationServiceSpecificationDefinition struct {
// MetricSpecifications - A list of Azure Monitoring metrics definition.
MetricSpecifications *[]OperationMetricSpecificationDefinition `json:"metricSpecifications,omitempty"`
@@ -3022,7 +3022,7 @@ func (r Resource) MarshalJSON() ([]byte, error) {
// RetentionPolicy the retention policy for a container registry.
type RetentionPolicy struct {
- // Days - The number of days to retain manifest before it expires.
+ // Days - The number of days to retain an untagged manifest after which it gets purged.
Days *int32 `json:"days,omitempty"`
// LastUpdatedTime - READ-ONLY; The timestamp when the policy was last updated.
LastUpdatedTime *date.Time `json:"lastUpdatedTime,omitempty"`
diff --git a/services/containerregistry/mgmt/2019-06-01-preview/containerregistry/models.go b/services/containerregistry/mgmt/2019-06-01-preview/containerregistry/models.go
index f63f572b0722..f3c40cb85d90 100644
--- a/services/containerregistry/mgmt/2019-06-01-preview/containerregistry/models.go
+++ b/services/containerregistry/mgmt/2019-06-01-preview/containerregistry/models.go
@@ -2051,7 +2051,7 @@ type OperationPropertiesDefinition struct {
ServiceSpecification *OperationServiceSpecificationDefinition `json:"serviceSpecification,omitempty"`
}
-// OperationServiceSpecificationDefinition the definition of Azure Monitoring metrics list.
+// OperationServiceSpecificationDefinition the definition of Azure Monitoring list.
type OperationServiceSpecificationDefinition struct {
// MetricSpecifications - A list of Azure Monitoring metrics definition.
MetricSpecifications *[]OperationMetricSpecificationDefinition `json:"metricSpecifications,omitempty"`
@@ -3066,7 +3066,7 @@ func (r Resource) MarshalJSON() ([]byte, error) {
// RetentionPolicy the retention policy for a container registry.
type RetentionPolicy struct {
- // Days - The number of days to retain manifest before it expires.
+ // Days - The number of days to retain an untagged manifest after which it gets purged.
Days *int32 `json:"days,omitempty"`
// LastUpdatedTime - READ-ONLY; The timestamp when the policy was last updated.
LastUpdatedTime *date.Time `json:"lastUpdatedTime,omitempty"`
diff --git a/services/containerservice/mgmt/2019-08-01/containerservice/agentpools.go b/services/containerservice/mgmt/2019-08-01/containerservice/agentpools.go
new file mode 100644
index 000000000000..9a6f1e9c79a6
--- /dev/null
+++ b/services/containerservice/mgmt/2019-08-01/containerservice/agentpools.go
@@ -0,0 +1,623 @@
+package containerservice
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// AgentPoolsClient is the the Container Service Client.
+type AgentPoolsClient struct {
+ BaseClient
+}
+
+// NewAgentPoolsClient creates an instance of the AgentPoolsClient client.
+func NewAgentPoolsClient(subscriptionID string) AgentPoolsClient {
+ return NewAgentPoolsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewAgentPoolsClientWithBaseURI creates an instance of the AgentPoolsClient client.
+func NewAgentPoolsClientWithBaseURI(baseURI string, subscriptionID string) AgentPoolsClient {
+ return AgentPoolsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate creates or updates an agent pool in the specified managed cluster.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// resourceName - the name of the managed cluster resource.
+// agentPoolName - the name of the agent pool.
+// parameters - parameters supplied to the Create or Update an agent pool operation.
+func (client AgentPoolsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string, parameters AgentPool) (result AgentPoolsCreateOrUpdateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AgentPoolsClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: resourceName,
+ Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}},
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters.ManagedClusterAgentPoolProfileProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.ManagedClusterAgentPoolProfileProperties.Count", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "parameters.ManagedClusterAgentPoolProfileProperties.Count", Name: validation.InclusiveMaximum, Rule: int64(100), Chain: nil},
+ {Target: "parameters.ManagedClusterAgentPoolProfileProperties.Count", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
+ }},
+ }}}}}); err != nil {
+ return result, validation.NewError("containerservice.AgentPoolsClient", "CreateOrUpdate", err.Error())
+ }
+
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, resourceName, agentPoolName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client AgentPoolsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string, parameters AgentPool) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "agentPoolName": autorest.Encode("path", agentPoolName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "resourceName": autorest.Encode("path", resourceName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client AgentPoolsClient) CreateOrUpdateSender(req *http.Request) (future AgentPoolsCreateOrUpdateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client AgentPoolsClient) CreateOrUpdateResponder(resp *http.Response) (result AgentPool, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes the agent pool in the specified managed cluster.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// resourceName - the name of the managed cluster resource.
+// agentPoolName - the name of the agent pool.
+func (client AgentPoolsClient) Delete(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string) (result AgentPoolsDeleteFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AgentPoolsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: resourceName,
+ Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("containerservice.AgentPoolsClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, resourceName, agentPoolName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.DeleteSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "Delete", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client AgentPoolsClient) DeletePreparer(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "agentPoolName": autorest.Encode("path", agentPoolName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "resourceName": autorest.Encode("path", resourceName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client AgentPoolsClient) DeleteSender(req *http.Request) (future AgentPoolsDeleteFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client AgentPoolsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets the details of the agent pool by managed cluster and resource group.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// resourceName - the name of the managed cluster resource.
+// agentPoolName - the name of the agent pool.
+func (client AgentPoolsClient) Get(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string) (result AgentPool, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AgentPoolsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: resourceName,
+ Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("containerservice.AgentPoolsClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, resourceName, agentPoolName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client AgentPoolsClient) GetPreparer(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "agentPoolName": autorest.Encode("path", agentPoolName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "resourceName": autorest.Encode("path", resourceName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client AgentPoolsClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client AgentPoolsClient) GetResponder(resp *http.Response) (result AgentPool, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetAvailableAgentPoolVersions gets a list of supported versions for the specified agent pool.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// resourceName - the name of the managed cluster resource.
+// agentPoolName - the name of the agent pool.
+func (client AgentPoolsClient) GetAvailableAgentPoolVersions(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string) (result AgentPoolAvailableVersions, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AgentPoolsClient.GetAvailableAgentPoolVersions")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: resourceName,
+ Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("containerservice.AgentPoolsClient", "GetAvailableAgentPoolVersions", err.Error())
+ }
+
+ req, err := client.GetAvailableAgentPoolVersionsPreparer(ctx, resourceGroupName, resourceName, agentPoolName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "GetAvailableAgentPoolVersions", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetAvailableAgentPoolVersionsSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "GetAvailableAgentPoolVersions", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetAvailableAgentPoolVersionsResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "GetAvailableAgentPoolVersions", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetAvailableAgentPoolVersionsPreparer prepares the GetAvailableAgentPoolVersions request.
+func (client AgentPoolsClient) GetAvailableAgentPoolVersionsPreparer(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "agentPoolName": autorest.Encode("path", agentPoolName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "resourceName": autorest.Encode("path", resourceName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/availableAgentPoolVersions", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetAvailableAgentPoolVersionsSender sends the GetAvailableAgentPoolVersions request. The method will close the
+// http.Response Body if it receives an error.
+func (client AgentPoolsClient) GetAvailableAgentPoolVersionsSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetAvailableAgentPoolVersionsResponder handles the response to the GetAvailableAgentPoolVersions request. The method always
+// closes the http.Response Body.
+func (client AgentPoolsClient) GetAvailableAgentPoolVersionsResponder(resp *http.Response) (result AgentPoolAvailableVersions, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetUpgradeProfile gets the details of the upgrade profile for an agent pool with a specified resource group and
+// managed cluster name.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// resourceName - the name of the managed cluster resource.
+// agentPoolName - the name of the agent pool.
+func (client AgentPoolsClient) GetUpgradeProfile(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string) (result AgentPoolUpgradeProfile, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AgentPoolsClient.GetUpgradeProfile")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: resourceName,
+ Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("containerservice.AgentPoolsClient", "GetUpgradeProfile", err.Error())
+ }
+
+ req, err := client.GetUpgradeProfilePreparer(ctx, resourceGroupName, resourceName, agentPoolName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "GetUpgradeProfile", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetUpgradeProfileSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "GetUpgradeProfile", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetUpgradeProfileResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "GetUpgradeProfile", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetUpgradeProfilePreparer prepares the GetUpgradeProfile request.
+func (client AgentPoolsClient) GetUpgradeProfilePreparer(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "agentPoolName": autorest.Encode("path", agentPoolName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "resourceName": autorest.Encode("path", resourceName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeProfiles/default", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetUpgradeProfileSender sends the GetUpgradeProfile request. The method will close the
+// http.Response Body if it receives an error.
+func (client AgentPoolsClient) GetUpgradeProfileSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetUpgradeProfileResponder handles the response to the GetUpgradeProfile request. The method always
+// closes the http.Response Body.
+func (client AgentPoolsClient) GetUpgradeProfileResponder(resp *http.Response) (result AgentPoolUpgradeProfile, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List gets a list of agent pools in the specified managed cluster. The operation returns properties of each agent
+// pool.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// resourceName - the name of the managed cluster resource.
+func (client AgentPoolsClient) List(ctx context.Context, resourceGroupName string, resourceName string) (result AgentPoolListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AgentPoolsClient.List")
+ defer func() {
+ sc := -1
+ if result.aplr.Response.Response != nil {
+ sc = result.aplr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: resourceName,
+ Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("containerservice.AgentPoolsClient", "List", err.Error())
+ }
+
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx, resourceGroupName, resourceName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.aplr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.aplr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client AgentPoolsClient) ListPreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "resourceName": autorest.Encode("path", resourceName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client AgentPoolsClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client AgentPoolsClient) ListResponder(resp *http.Response) (result AgentPoolListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client AgentPoolsClient) listNextResults(ctx context.Context, lastResults AgentPoolListResult) (result AgentPoolListResult, err error) {
+ req, err := lastResults.agentPoolListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client AgentPoolsClient) ListComplete(ctx context.Context, resourceGroupName string, resourceName string) (result AgentPoolListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AgentPoolsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, resourceGroupName, resourceName)
+ return
+}
diff --git a/services/containerservice/mgmt/2019-08-01/containerservice/client.go b/services/containerservice/mgmt/2019-08-01/containerservice/client.go
new file mode 100644
index 000000000000..09a36def717c
--- /dev/null
+++ b/services/containerservice/mgmt/2019-08-01/containerservice/client.go
@@ -0,0 +1,51 @@
+// Package containerservice implements the Azure ARM Containerservice service API version .
+//
+// The Container Service Client.
+package containerservice
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/Azure/go-autorest/autorest"
+)
+
+const (
+ // DefaultBaseURI is the default URI used for the service Containerservice
+ DefaultBaseURI = "https://management.azure.com"
+)
+
+// BaseClient is the base client for Containerservice.
+type BaseClient struct {
+ autorest.Client
+ BaseURI string
+ SubscriptionID string
+}
+
+// New creates an instance of the BaseClient client.
+func New(subscriptionID string) BaseClient {
+ return NewWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewWithBaseURI creates an instance of the BaseClient client.
+func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient {
+ return BaseClient{
+ Client: autorest.NewClientWithUserAgent(UserAgent()),
+ BaseURI: baseURI,
+ SubscriptionID: subscriptionID,
+ }
+}
diff --git a/services/containerservice/mgmt/2019-08-01/containerservice/containerserviceapi/interfaces.go b/services/containerservice/mgmt/2019-08-01/containerservice/containerserviceapi/interfaces.go
new file mode 100644
index 000000000000..562e17423989
--- /dev/null
+++ b/services/containerservice/mgmt/2019-08-01/containerservice/containerserviceapi/interfaces.go
@@ -0,0 +1,84 @@
+package containerserviceapi
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2019-08-01/containerservice"
+)
+
+// OpenShiftManagedClustersClientAPI contains the set of methods on the OpenShiftManagedClustersClient type.
+type OpenShiftManagedClustersClientAPI interface {
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, parameters containerservice.OpenShiftManagedCluster) (result containerservice.OpenShiftManagedClustersCreateOrUpdateFuture, err error)
+ Delete(ctx context.Context, resourceGroupName string, resourceName string) (result containerservice.OpenShiftManagedClustersDeleteFuture, err error)
+ Get(ctx context.Context, resourceGroupName string, resourceName string) (result containerservice.OpenShiftManagedCluster, err error)
+ List(ctx context.Context) (result containerservice.OpenShiftManagedClusterListResultPage, err error)
+ ListByResourceGroup(ctx context.Context, resourceGroupName string) (result containerservice.OpenShiftManagedClusterListResultPage, err error)
+ UpdateTags(ctx context.Context, resourceGroupName string, resourceName string, parameters containerservice.TagsObject) (result containerservice.OpenShiftManagedClustersUpdateTagsFuture, err error)
+}
+
+var _ OpenShiftManagedClustersClientAPI = (*containerservice.OpenShiftManagedClustersClient)(nil)
+
+// ContainerServicesClientAPI contains the set of methods on the ContainerServicesClient type.
+type ContainerServicesClientAPI interface {
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, containerServiceName string, parameters containerservice.ContainerService) (result containerservice.ContainerServicesCreateOrUpdateFutureType, err error)
+ Delete(ctx context.Context, resourceGroupName string, containerServiceName string) (result containerservice.ContainerServicesDeleteFutureType, err error)
+ Get(ctx context.Context, resourceGroupName string, containerServiceName string) (result containerservice.ContainerService, err error)
+ List(ctx context.Context) (result containerservice.ListResultPage, err error)
+ ListByResourceGroup(ctx context.Context, resourceGroupName string) (result containerservice.ListResultPage, err error)
+ ListOrchestrators(ctx context.Context, location string, resourceType string) (result containerservice.OrchestratorVersionProfileListResult, err error)
+}
+
+var _ ContainerServicesClientAPI = (*containerservice.ContainerServicesClient)(nil)
+
+// OperationsClientAPI contains the set of methods on the OperationsClient type.
+type OperationsClientAPI interface {
+ List(ctx context.Context) (result containerservice.OperationListResult, err error)
+}
+
+var _ OperationsClientAPI = (*containerservice.OperationsClient)(nil)
+
+// ManagedClustersClientAPI contains the set of methods on the ManagedClustersClient type.
+type ManagedClustersClientAPI interface {
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, parameters containerservice.ManagedCluster) (result containerservice.ManagedClustersCreateOrUpdateFuture, err error)
+ Delete(ctx context.Context, resourceGroupName string, resourceName string) (result containerservice.ManagedClustersDeleteFuture, err error)
+ Get(ctx context.Context, resourceGroupName string, resourceName string) (result containerservice.ManagedCluster, err error)
+ GetAccessProfile(ctx context.Context, resourceGroupName string, resourceName string, roleName string) (result containerservice.ManagedClusterAccessProfile, err error)
+ GetUpgradeProfile(ctx context.Context, resourceGroupName string, resourceName string) (result containerservice.ManagedClusterUpgradeProfile, err error)
+ List(ctx context.Context) (result containerservice.ManagedClusterListResultPage, err error)
+ ListByResourceGroup(ctx context.Context, resourceGroupName string) (result containerservice.ManagedClusterListResultPage, err error)
+ ListClusterAdminCredentials(ctx context.Context, resourceGroupName string, resourceName string) (result containerservice.CredentialResults, err error)
+ ListClusterUserCredentials(ctx context.Context, resourceGroupName string, resourceName string) (result containerservice.CredentialResults, err error)
+ ResetAADProfile(ctx context.Context, resourceGroupName string, resourceName string, parameters containerservice.ManagedClusterAADProfile) (result containerservice.ManagedClustersResetAADProfileFuture, err error)
+ ResetServicePrincipalProfile(ctx context.Context, resourceGroupName string, resourceName string, parameters containerservice.ManagedClusterServicePrincipalProfile) (result containerservice.ManagedClustersResetServicePrincipalProfileFuture, err error)
+ UpdateTags(ctx context.Context, resourceGroupName string, resourceName string, parameters containerservice.TagsObject) (result containerservice.ManagedClustersUpdateTagsFuture, err error)
+}
+
+var _ ManagedClustersClientAPI = (*containerservice.ManagedClustersClient)(nil)
+
+// AgentPoolsClientAPI contains the set of methods on the AgentPoolsClient type.
+type AgentPoolsClientAPI interface {
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string, parameters containerservice.AgentPool) (result containerservice.AgentPoolsCreateOrUpdateFuture, err error)
+ Delete(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string) (result containerservice.AgentPoolsDeleteFuture, err error)
+ Get(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string) (result containerservice.AgentPool, err error)
+ GetAvailableAgentPoolVersions(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string) (result containerservice.AgentPoolAvailableVersions, err error)
+ GetUpgradeProfile(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string) (result containerservice.AgentPoolUpgradeProfile, err error)
+ List(ctx context.Context, resourceGroupName string, resourceName string) (result containerservice.AgentPoolListResultPage, err error)
+}
+
+var _ AgentPoolsClientAPI = (*containerservice.AgentPoolsClient)(nil)
diff --git a/services/containerservice/mgmt/2019-08-01/containerservice/containerservices.go b/services/containerservice/mgmt/2019-08-01/containerservice/containerservices.go
new file mode 100644
index 000000000000..1bd2a37eec45
--- /dev/null
+++ b/services/containerservice/mgmt/2019-08-01/containerservice/containerservices.go
@@ -0,0 +1,622 @@
+package containerservice
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// ContainerServicesClient is the the Container Service Client.
+type ContainerServicesClient struct {
+ BaseClient
+}
+
+// NewContainerServicesClient creates an instance of the ContainerServicesClient client.
+func NewContainerServicesClient(subscriptionID string) ContainerServicesClient {
+ return NewContainerServicesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewContainerServicesClientWithBaseURI creates an instance of the ContainerServicesClient client.
+func NewContainerServicesClientWithBaseURI(baseURI string, subscriptionID string) ContainerServicesClient {
+ return ContainerServicesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate creates or updates a container service with the specified configuration of orchestrator, masters, and
+// agents.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// containerServiceName - the name of the container service in the specified subscription and resource group.
+// parameters - parameters supplied to the Create or Update a Container Service operation.
+func (client ContainerServicesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, containerServiceName string, parameters ContainerService) (result ContainerServicesCreateOrUpdateFutureType, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ContainerServicesClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters.Properties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.Properties.OrchestratorProfile", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.Properties.CustomProfile", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.Properties.CustomProfile.Orchestrator", Name: validation.Null, Rule: true, Chain: nil}}},
+ {Target: "parameters.Properties.ServicePrincipalProfile", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.Properties.ServicePrincipalProfile.ClientID", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.Properties.ServicePrincipalProfile.KeyVaultSecretRef", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.Properties.ServicePrincipalProfile.KeyVaultSecretRef.VaultID", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.Properties.ServicePrincipalProfile.KeyVaultSecretRef.SecretName", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ }},
+ {Target: "parameters.Properties.MasterProfile", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "parameters.Properties.MasterProfile.DNSPrefix", Name: validation.Null, Rule: true, Chain: nil}}},
+ {Target: "parameters.Properties.WindowsProfile", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.Properties.WindowsProfile.AdminUsername", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "parameters.Properties.WindowsProfile.AdminUsername", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([._]?[a-zA-Z0-9]+)*$`, Chain: nil}}},
+ {Target: "parameters.Properties.WindowsProfile.AdminPassword", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ {Target: "parameters.Properties.LinuxProfile", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "parameters.Properties.LinuxProfile.AdminUsername", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "parameters.Properties.LinuxProfile.AdminUsername", Name: validation.Pattern, Rule: `^[A-Za-z][-A-Za-z0-9_]*$`, Chain: nil}}},
+ {Target: "parameters.Properties.LinuxProfile.SSH", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "parameters.Properties.LinuxProfile.SSH.PublicKeys", Name: validation.Null, Rule: true, Chain: nil}}},
+ }},
+ {Target: "parameters.Properties.DiagnosticsProfile", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.Properties.DiagnosticsProfile.VMDiagnostics", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "parameters.Properties.DiagnosticsProfile.VMDiagnostics.Enabled", Name: validation.Null, Rule: true, Chain: nil}}},
+ }},
+ }}}}}); err != nil {
+ return result, validation.NewError("containerservice.ContainerServicesClient", "CreateOrUpdate", err.Error())
+ }
+
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, containerServiceName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client ContainerServicesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, containerServiceName string, parameters ContainerService) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "containerServiceName": autorest.Encode("path", containerServiceName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2017-07-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices/{containerServiceName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client ContainerServicesClient) CreateOrUpdateSender(req *http.Request) (future ContainerServicesCreateOrUpdateFutureType, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client ContainerServicesClient) CreateOrUpdateResponder(resp *http.Response) (result ContainerService, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes the specified container service in the specified subscription and resource group. The operation does
+// not delete other resources created as part of creating a container service, including storage accounts, VMs, and
+// availability sets. All the other resources created with the container service are part of the same resource group
+// and can be deleted individually.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// containerServiceName - the name of the container service in the specified subscription and resource group.
+func (client ContainerServicesClient) Delete(ctx context.Context, resourceGroupName string, containerServiceName string) (result ContainerServicesDeleteFutureType, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ContainerServicesClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, resourceGroupName, containerServiceName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.DeleteSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "Delete", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client ContainerServicesClient) DeletePreparer(ctx context.Context, resourceGroupName string, containerServiceName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "containerServiceName": autorest.Encode("path", containerServiceName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2017-07-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices/{containerServiceName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client ContainerServicesClient) DeleteSender(req *http.Request) (future ContainerServicesDeleteFutureType, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client ContainerServicesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets the properties of the specified container service in the specified subscription and resource group. The
+// operation returns the properties including state, orchestrator, number of masters and agents, and FQDNs of masters
+// and agents.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// containerServiceName - the name of the container service in the specified subscription and resource group.
+func (client ContainerServicesClient) Get(ctx context.Context, resourceGroupName string, containerServiceName string) (result ContainerService, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ContainerServicesClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, resourceGroupName, containerServiceName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client ContainerServicesClient) GetPreparer(ctx context.Context, resourceGroupName string, containerServiceName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "containerServiceName": autorest.Encode("path", containerServiceName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2017-07-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices/{containerServiceName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client ContainerServicesClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client ContainerServicesClient) GetResponder(resp *http.Response) (result ContainerService, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List gets a list of container services in the specified subscription. The operation returns properties of each
+// container service including state, orchestrator, number of masters and agents, and FQDNs of masters and agents.
+func (client ContainerServicesClient) List(ctx context.Context) (result ListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ContainerServicesClient.List")
+ defer func() {
+ sc := -1
+ if result.lr.Response.Response != nil {
+ sc = result.lr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.lr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.lr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client ContainerServicesClient) ListPreparer(ctx context.Context) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2017-07-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/containerServices", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client ContainerServicesClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client ContainerServicesClient) ListResponder(resp *http.Response) (result ListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client ContainerServicesClient) listNextResults(ctx context.Context, lastResults ListResult) (result ListResult, err error) {
+ req, err := lastResults.listResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client ContainerServicesClient) ListComplete(ctx context.Context) (result ListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ContainerServicesClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx)
+ return
+}
+
+// ListByResourceGroup gets a list of container services in the specified subscription and resource group. The
+// operation returns properties of each container service including state, orchestrator, number of masters and agents,
+// and FQDNs of masters and agents.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+func (client ContainerServicesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result ListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ContainerServicesClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.lr.Response.Response != nil {
+ sc = result.lr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listByResourceGroupNextResults
+ req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "ListByResourceGroup", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByResourceGroupSender(req)
+ if err != nil {
+ result.lr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "ListByResourceGroup", resp, "Failure sending request")
+ return
+ }
+
+ result.lr, err = client.ListByResourceGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "ListByResourceGroup", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
+func (client ContainerServicesClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2017-07-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
+// http.Response Body if it receives an error.
+func (client ContainerServicesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
+// closes the http.Response Body.
+func (client ContainerServicesClient) ListByResourceGroupResponder(resp *http.Response) (result ListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByResourceGroupNextResults retrieves the next set of results, if any.
+func (client ContainerServicesClient) listByResourceGroupNextResults(ctx context.Context, lastResults ListResult) (result ListResult, err error) {
+ req, err := lastResults.listResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByResourceGroupSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByResourceGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
+func (client ContainerServicesClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result ListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ContainerServicesClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByResourceGroup(ctx, resourceGroupName)
+ return
+}
+
+// ListOrchestrators gets a list of supported orchestrators in the specified subscription. The operation returns
+// properties of each orchestrator including version, available upgrades and whether that version or upgrades are in
+// preview.
+// Parameters:
+// location - the name of a supported Azure region.
+// resourceType - resource type for which the list of orchestrators needs to be returned
+func (client ContainerServicesClient) ListOrchestrators(ctx context.Context, location string, resourceType string) (result OrchestratorVersionProfileListResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ContainerServicesClient.ListOrchestrators")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.ListOrchestratorsPreparer(ctx, location, resourceType)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "ListOrchestrators", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListOrchestratorsSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "ListOrchestrators", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListOrchestratorsResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesClient", "ListOrchestrators", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListOrchestratorsPreparer prepares the ListOrchestrators request.
+func (client ContainerServicesClient) ListOrchestratorsPreparer(ctx context.Context, location string, resourceType string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "location": autorest.Encode("path", location),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(resourceType) > 0 {
+ queryParameters["resource-type"] = autorest.Encode("query", resourceType)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/locations/{location}/orchestrators", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListOrchestratorsSender sends the ListOrchestrators request. The method will close the
+// http.Response Body if it receives an error.
+func (client ContainerServicesClient) ListOrchestratorsSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListOrchestratorsResponder handles the response to the ListOrchestrators request. The method always
+// closes the http.Response Body.
+func (client ContainerServicesClient) ListOrchestratorsResponder(resp *http.Response) (result OrchestratorVersionProfileListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/containerservice/mgmt/2019-08-01/containerservice/managedclusters.go b/services/containerservice/mgmt/2019-08-01/containerservice/managedclusters.go
new file mode 100644
index 000000000000..fc526da89452
--- /dev/null
+++ b/services/containerservice/mgmt/2019-08-01/containerservice/managedclusters.go
@@ -0,0 +1,1201 @@
+package containerservice
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// ManagedClustersClient is the the Container Service Client.
+type ManagedClustersClient struct {
+ BaseClient
+}
+
+// NewManagedClustersClient creates an instance of the ManagedClustersClient client.
+func NewManagedClustersClient(subscriptionID string) ManagedClustersClient {
+ return NewManagedClustersClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewManagedClustersClientWithBaseURI creates an instance of the ManagedClustersClient client.
+func NewManagedClustersClientWithBaseURI(baseURI string, subscriptionID string) ManagedClustersClient {
+ return ManagedClustersClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate creates or updates a managed cluster with the specified configuration for agents and Kubernetes
+// version.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// resourceName - the name of the managed cluster resource.
+// parameters - parameters supplied to the Create or Update a Managed Cluster operation.
+func (client ManagedClustersClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedCluster) (result ManagedClustersCreateOrUpdateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: resourceName,
+ Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}},
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters.ManagedClusterProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.LinuxProfile", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.LinuxProfile.AdminUsername", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.LinuxProfile.AdminUsername", Name: validation.Pattern, Rule: `^[A-Za-z][-A-Za-z0-9_]*$`, Chain: nil}}},
+ {Target: "parameters.ManagedClusterProperties.LinuxProfile.SSH", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.LinuxProfile.SSH.PublicKeys", Name: validation.Null, Rule: true, Chain: nil}}},
+ }},
+ {Target: "parameters.ManagedClusterProperties.WindowsProfile", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.WindowsProfile.AdminUsername", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.WindowsProfile.AdminUsername", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([._]?[a-zA-Z0-9]+)*$`, Chain: nil}}},
+ }},
+ {Target: "parameters.ManagedClusterProperties.ServicePrincipalProfile", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.ServicePrincipalProfile.ClientID", Name: validation.Null, Rule: true, Chain: nil}}},
+ {Target: "parameters.ManagedClusterProperties.NetworkProfile", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.PodCidr", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.PodCidr", Name: validation.Pattern, Rule: `^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$`, Chain: nil}}},
+ {Target: "parameters.ManagedClusterProperties.NetworkProfile.ServiceCidr", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.ServiceCidr", Name: validation.Pattern, Rule: `^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$`, Chain: nil}}},
+ {Target: "parameters.ManagedClusterProperties.NetworkProfile.DNSServiceIP", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.DNSServiceIP", Name: validation.Pattern, Rule: `^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$`, Chain: nil}}},
+ {Target: "parameters.ManagedClusterProperties.NetworkProfile.DockerBridgeCidr", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.DockerBridgeCidr", Name: validation.Pattern, Rule: `^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$`, Chain: nil}}},
+ {Target: "parameters.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile.ManagedOutboundIPs", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile.ManagedOutboundIPs.Count", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile.ManagedOutboundIPs.Count", Name: validation.InclusiveMaximum, Rule: int64(100), Chain: nil},
+ {Target: "parameters.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile.ManagedOutboundIPs.Count", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
+ }},
+ }},
+ }},
+ }},
+ {Target: "parameters.ManagedClusterProperties.AadProfile", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.AadProfile.ClientAppID", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.ManagedClusterProperties.AadProfile.ServerAppID", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ }}}}}); err != nil {
+ return result, validation.NewError("containerservice.ManagedClustersClient", "CreateOrUpdate", err.Error())
+ }
+
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, resourceName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client ManagedClustersClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedCluster) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "resourceName": autorest.Encode("path", resourceName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client ManagedClustersClient) CreateOrUpdateSender(req *http.Request) (future ManagedClustersCreateOrUpdateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client ManagedClustersClient) CreateOrUpdateResponder(resp *http.Response) (result ManagedCluster, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes the managed cluster with a specified resource group and name.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// resourceName - the name of the managed cluster resource.
+func (client ManagedClustersClient) Delete(ctx context.Context, resourceGroupName string, resourceName string) (result ManagedClustersDeleteFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: resourceName,
+ Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("containerservice.ManagedClustersClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, resourceName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.DeleteSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "Delete", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client ManagedClustersClient) DeletePreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "resourceName": autorest.Encode("path", resourceName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client ManagedClustersClient) DeleteSender(req *http.Request) (future ManagedClustersDeleteFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client ManagedClustersClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets the details of the managed cluster with a specified resource group and name.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// resourceName - the name of the managed cluster resource.
+func (client ManagedClustersClient) Get(ctx context.Context, resourceGroupName string, resourceName string) (result ManagedCluster, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: resourceName,
+ Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("containerservice.ManagedClustersClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, resourceName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client ManagedClustersClient) GetPreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "resourceName": autorest.Encode("path", resourceName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client ManagedClustersClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client ManagedClustersClient) GetResponder(resp *http.Response) (result ManagedCluster, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetAccessProfile gets the accessProfile for the specified role name of the managed cluster with a specified resource
+// group and name.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// resourceName - the name of the managed cluster resource.
+// roleName - the name of the role for managed cluster accessProfile resource.
+func (client ManagedClustersClient) GetAccessProfile(ctx context.Context, resourceGroupName string, resourceName string, roleName string) (result ManagedClusterAccessProfile, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.GetAccessProfile")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: resourceName,
+ Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("containerservice.ManagedClustersClient", "GetAccessProfile", err.Error())
+ }
+
+ req, err := client.GetAccessProfilePreparer(ctx, resourceGroupName, resourceName, roleName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "GetAccessProfile", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetAccessProfileSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "GetAccessProfile", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetAccessProfileResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "GetAccessProfile", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetAccessProfilePreparer prepares the GetAccessProfile request.
+func (client ManagedClustersClient) GetAccessProfilePreparer(ctx context.Context, resourceGroupName string, resourceName string, roleName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "resourceName": autorest.Encode("path", resourceName),
+ "roleName": autorest.Encode("path", roleName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/accessProfiles/{roleName}/listCredential", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetAccessProfileSender sends the GetAccessProfile request. The method will close the
+// http.Response Body if it receives an error.
+func (client ManagedClustersClient) GetAccessProfileSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetAccessProfileResponder handles the response to the GetAccessProfile request. The method always
+// closes the http.Response Body.
+func (client ManagedClustersClient) GetAccessProfileResponder(resp *http.Response) (result ManagedClusterAccessProfile, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetUpgradeProfile gets the details of the upgrade profile for a managed cluster with a specified resource group and
+// name.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// resourceName - the name of the managed cluster resource.
+func (client ManagedClustersClient) GetUpgradeProfile(ctx context.Context, resourceGroupName string, resourceName string) (result ManagedClusterUpgradeProfile, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.GetUpgradeProfile")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: resourceName,
+ Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("containerservice.ManagedClustersClient", "GetUpgradeProfile", err.Error())
+ }
+
+ req, err := client.GetUpgradeProfilePreparer(ctx, resourceGroupName, resourceName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "GetUpgradeProfile", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetUpgradeProfileSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "GetUpgradeProfile", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetUpgradeProfileResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "GetUpgradeProfile", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetUpgradeProfilePreparer prepares the GetUpgradeProfile request.
+func (client ManagedClustersClient) GetUpgradeProfilePreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "resourceName": autorest.Encode("path", resourceName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/upgradeProfiles/default", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetUpgradeProfileSender sends the GetUpgradeProfile request. The method will close the
+// http.Response Body if it receives an error.
+func (client ManagedClustersClient) GetUpgradeProfileSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetUpgradeProfileResponder handles the response to the GetUpgradeProfile request. The method always
+// closes the http.Response Body.
+func (client ManagedClustersClient) GetUpgradeProfileResponder(resp *http.Response) (result ManagedClusterUpgradeProfile, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List gets a list of managed clusters in the specified subscription. The operation returns properties of each managed
+// cluster.
+func (client ManagedClustersClient) List(ctx context.Context) (result ManagedClusterListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.List")
+ defer func() {
+ sc := -1
+ if result.mclr.Response.Response != nil {
+ sc = result.mclr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.mclr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.mclr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client ManagedClustersClient) ListPreparer(ctx context.Context) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/managedClusters", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client ManagedClustersClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client ManagedClustersClient) ListResponder(resp *http.Response) (result ManagedClusterListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client ManagedClustersClient) listNextResults(ctx context.Context, lastResults ManagedClusterListResult) (result ManagedClusterListResult, err error) {
+ req, err := lastResults.managedClusterListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client ManagedClustersClient) ListComplete(ctx context.Context) (result ManagedClusterListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx)
+ return
+}
+
+// ListByResourceGroup lists managed clusters in the specified subscription and resource group. The operation returns
+// properties of each managed cluster.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+func (client ManagedClustersClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result ManagedClusterListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.mclr.Response.Response != nil {
+ sc = result.mclr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("containerservice.ManagedClustersClient", "ListByResourceGroup", err.Error())
+ }
+
+ result.fn = client.listByResourceGroupNextResults
+ req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListByResourceGroup", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByResourceGroupSender(req)
+ if err != nil {
+ result.mclr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListByResourceGroup", resp, "Failure sending request")
+ return
+ }
+
+ result.mclr, err = client.ListByResourceGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListByResourceGroup", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
+func (client ManagedClustersClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
+// http.Response Body if it receives an error.
+func (client ManagedClustersClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
+// closes the http.Response Body.
+func (client ManagedClustersClient) ListByResourceGroupResponder(resp *http.Response) (result ManagedClusterListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByResourceGroupNextResults retrieves the next set of results, if any.
+func (client ManagedClustersClient) listByResourceGroupNextResults(ctx context.Context, lastResults ManagedClusterListResult) (result ManagedClusterListResult, err error) {
+ req, err := lastResults.managedClusterListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByResourceGroupSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByResourceGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
+func (client ManagedClustersClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result ManagedClusterListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByResourceGroup(ctx, resourceGroupName)
+ return
+}
+
+// ListClusterAdminCredentials gets cluster admin credential of the managed cluster with a specified resource group and
+// name.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// resourceName - the name of the managed cluster resource.
+func (client ManagedClustersClient) ListClusterAdminCredentials(ctx context.Context, resourceGroupName string, resourceName string) (result CredentialResults, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.ListClusterAdminCredentials")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: resourceName,
+ Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("containerservice.ManagedClustersClient", "ListClusterAdminCredentials", err.Error())
+ }
+
+ req, err := client.ListClusterAdminCredentialsPreparer(ctx, resourceGroupName, resourceName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListClusterAdminCredentials", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListClusterAdminCredentialsSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListClusterAdminCredentials", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListClusterAdminCredentialsResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListClusterAdminCredentials", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListClusterAdminCredentialsPreparer prepares the ListClusterAdminCredentials request.
+func (client ManagedClustersClient) ListClusterAdminCredentialsPreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "resourceName": autorest.Encode("path", resourceName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterAdminCredential", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListClusterAdminCredentialsSender sends the ListClusterAdminCredentials request. The method will close the
+// http.Response Body if it receives an error.
+func (client ManagedClustersClient) ListClusterAdminCredentialsSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListClusterAdminCredentialsResponder handles the response to the ListClusterAdminCredentials request. The method always
+// closes the http.Response Body.
+func (client ManagedClustersClient) ListClusterAdminCredentialsResponder(resp *http.Response) (result CredentialResults, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListClusterUserCredentials gets cluster user credential of the managed cluster with a specified resource group and
+// name.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// resourceName - the name of the managed cluster resource.
+func (client ManagedClustersClient) ListClusterUserCredentials(ctx context.Context, resourceGroupName string, resourceName string) (result CredentialResults, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.ListClusterUserCredentials")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: resourceName,
+ Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("containerservice.ManagedClustersClient", "ListClusterUserCredentials", err.Error())
+ }
+
+ req, err := client.ListClusterUserCredentialsPreparer(ctx, resourceGroupName, resourceName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListClusterUserCredentials", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListClusterUserCredentialsSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListClusterUserCredentials", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListClusterUserCredentialsResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListClusterUserCredentials", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListClusterUserCredentialsPreparer prepares the ListClusterUserCredentials request.
+func (client ManagedClustersClient) ListClusterUserCredentialsPreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "resourceName": autorest.Encode("path", resourceName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterUserCredential", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListClusterUserCredentialsSender sends the ListClusterUserCredentials request. The method will close the
+// http.Response Body if it receives an error.
+func (client ManagedClustersClient) ListClusterUserCredentialsSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListClusterUserCredentialsResponder handles the response to the ListClusterUserCredentials request. The method always
+// closes the http.Response Body.
+func (client ManagedClustersClient) ListClusterUserCredentialsResponder(resp *http.Response) (result CredentialResults, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ResetAADProfile update the AAD Profile for a managed cluster.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// resourceName - the name of the managed cluster resource.
+// parameters - parameters supplied to the Reset AAD Profile operation for a Managed Cluster.
+func (client ManagedClustersClient) ResetAADProfile(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedClusterAADProfile) (result ManagedClustersResetAADProfileFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.ResetAADProfile")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: resourceName,
+ Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}},
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters.ClientAppID", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.ServerAppID", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("containerservice.ManagedClustersClient", "ResetAADProfile", err.Error())
+ }
+
+ req, err := client.ResetAADProfilePreparer(ctx, resourceGroupName, resourceName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ResetAADProfile", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.ResetAADProfileSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ResetAADProfile", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// ResetAADProfilePreparer prepares the ResetAADProfile request.
+func (client ManagedClustersClient) ResetAADProfilePreparer(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedClusterAADProfile) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "resourceName": autorest.Encode("path", resourceName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ResetAADProfileSender sends the ResetAADProfile request. The method will close the
+// http.Response Body if it receives an error.
+func (client ManagedClustersClient) ResetAADProfileSender(req *http.Request) (future ManagedClustersResetAADProfileFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// ResetAADProfileResponder handles the response to the ResetAADProfile request. The method always
+// closes the http.Response Body.
+func (client ManagedClustersClient) ResetAADProfileResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// ResetServicePrincipalProfile update the service principal Profile for a managed cluster.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// resourceName - the name of the managed cluster resource.
+// parameters - parameters supplied to the Reset Service Principal Profile operation for a Managed Cluster.
+func (client ManagedClustersClient) ResetServicePrincipalProfile(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedClusterServicePrincipalProfile) (result ManagedClustersResetServicePrincipalProfileFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.ResetServicePrincipalProfile")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: resourceName,
+ Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}},
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters.ClientID", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("containerservice.ManagedClustersClient", "ResetServicePrincipalProfile", err.Error())
+ }
+
+ req, err := client.ResetServicePrincipalProfilePreparer(ctx, resourceGroupName, resourceName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ResetServicePrincipalProfile", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.ResetServicePrincipalProfileSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ResetServicePrincipalProfile", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// ResetServicePrincipalProfilePreparer prepares the ResetServicePrincipalProfile request.
+func (client ManagedClustersClient) ResetServicePrincipalProfilePreparer(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedClusterServicePrincipalProfile) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "resourceName": autorest.Encode("path", resourceName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ResetServicePrincipalProfileSender sends the ResetServicePrincipalProfile request. The method will close the
+// http.Response Body if it receives an error.
+func (client ManagedClustersClient) ResetServicePrincipalProfileSender(req *http.Request) (future ManagedClustersResetServicePrincipalProfileFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// ResetServicePrincipalProfileResponder handles the response to the ResetServicePrincipalProfile request. The method always
+// closes the http.Response Body.
+func (client ManagedClustersClient) ResetServicePrincipalProfileResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// UpdateTags updates a managed cluster with the specified tags.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// resourceName - the name of the managed cluster resource.
+// parameters - parameters supplied to the Update Managed Cluster Tags operation.
+func (client ManagedClustersClient) UpdateTags(ctx context.Context, resourceGroupName string, resourceName string, parameters TagsObject) (result ManagedClustersUpdateTagsFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.UpdateTags")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: resourceName,
+ Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("containerservice.ManagedClustersClient", "UpdateTags", err.Error())
+ }
+
+ req, err := client.UpdateTagsPreparer(ctx, resourceGroupName, resourceName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "UpdateTags", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.UpdateTagsSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "UpdateTags", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// UpdateTagsPreparer prepares the UpdateTags request.
+func (client ManagedClustersClient) UpdateTagsPreparer(ctx context.Context, resourceGroupName string, resourceName string, parameters TagsObject) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "resourceName": autorest.Encode("path", resourceName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateTagsSender sends the UpdateTags request. The method will close the
+// http.Response Body if it receives an error.
+func (client ManagedClustersClient) UpdateTagsSender(req *http.Request) (future ManagedClustersUpdateTagsFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// UpdateTagsResponder handles the response to the UpdateTags request. The method always
+// closes the http.Response Body.
+func (client ManagedClustersClient) UpdateTagsResponder(resp *http.Response) (result ManagedCluster, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/containerservice/mgmt/2019-08-01/containerservice/models.go b/services/containerservice/mgmt/2019-08-01/containerservice/models.go
new file mode 100644
index 000000000000..8e06f1fad5c4
--- /dev/null
+++ b/services/containerservice/mgmt/2019-08-01/containerservice/models.go
@@ -0,0 +1,3297 @@
+package containerservice
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "encoding/json"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/to"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// The package's fully qualified name.
+const fqdn = "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2019-08-01/containerservice"
+
+// AgentPoolType enumerates the values for agent pool type.
+type AgentPoolType string
+
+const (
+ // AvailabilitySet ...
+ AvailabilitySet AgentPoolType = "AvailabilitySet"
+ // VirtualMachineScaleSets ...
+ VirtualMachineScaleSets AgentPoolType = "VirtualMachineScaleSets"
+)
+
+// PossibleAgentPoolTypeValues returns an array of possible values for the AgentPoolType const type.
+func PossibleAgentPoolTypeValues() []AgentPoolType {
+ return []AgentPoolType{AvailabilitySet, VirtualMachineScaleSets}
+}
+
+// Kind enumerates the values for kind.
+type Kind string
+
+const (
+ // KindAADIdentityProvider ...
+ KindAADIdentityProvider Kind = "AADIdentityProvider"
+ // KindOpenShiftManagedClusterBaseIdentityProvider ...
+ KindOpenShiftManagedClusterBaseIdentityProvider Kind = "OpenShiftManagedClusterBaseIdentityProvider"
+)
+
+// PossibleKindValues returns an array of possible values for the Kind const type.
+func PossibleKindValues() []Kind {
+ return []Kind{KindAADIdentityProvider, KindOpenShiftManagedClusterBaseIdentityProvider}
+}
+
+// LoadBalancerSku enumerates the values for load balancer sku.
+type LoadBalancerSku string
+
+const (
+ // Basic ...
+ Basic LoadBalancerSku = "basic"
+ // Standard ...
+ Standard LoadBalancerSku = "standard"
+)
+
+// PossibleLoadBalancerSkuValues returns an array of possible values for the LoadBalancerSku const type.
+func PossibleLoadBalancerSkuValues() []LoadBalancerSku {
+ return []LoadBalancerSku{Basic, Standard}
+}
+
+// NetworkPlugin enumerates the values for network plugin.
+type NetworkPlugin string
+
+const (
+ // Azure ...
+ Azure NetworkPlugin = "azure"
+ // Kubenet ...
+ Kubenet NetworkPlugin = "kubenet"
+)
+
+// PossibleNetworkPluginValues returns an array of possible values for the NetworkPlugin const type.
+func PossibleNetworkPluginValues() []NetworkPlugin {
+ return []NetworkPlugin{Azure, Kubenet}
+}
+
+// NetworkPolicy enumerates the values for network policy.
+type NetworkPolicy string
+
+const (
+ // NetworkPolicyAzure ...
+ NetworkPolicyAzure NetworkPolicy = "azure"
+ // NetworkPolicyCalico ...
+ NetworkPolicyCalico NetworkPolicy = "calico"
+)
+
+// PossibleNetworkPolicyValues returns an array of possible values for the NetworkPolicy const type.
+func PossibleNetworkPolicyValues() []NetworkPolicy {
+ return []NetworkPolicy{NetworkPolicyAzure, NetworkPolicyCalico}
+}
+
+// OpenShiftAgentPoolProfileRole enumerates the values for open shift agent pool profile role.
+type OpenShiftAgentPoolProfileRole string
+
+const (
+ // Compute ...
+ Compute OpenShiftAgentPoolProfileRole = "compute"
+ // Infra ...
+ Infra OpenShiftAgentPoolProfileRole = "infra"
+)
+
+// PossibleOpenShiftAgentPoolProfileRoleValues returns an array of possible values for the OpenShiftAgentPoolProfileRole const type.
+func PossibleOpenShiftAgentPoolProfileRoleValues() []OpenShiftAgentPoolProfileRole {
+ return []OpenShiftAgentPoolProfileRole{Compute, Infra}
+}
+
+// OpenShiftContainerServiceVMSize enumerates the values for open shift container service vm size.
+type OpenShiftContainerServiceVMSize string
+
+const (
+ // StandardD16sV3 ...
+ StandardD16sV3 OpenShiftContainerServiceVMSize = "Standard_D16s_v3"
+ // StandardD2sV3 ...
+ StandardD2sV3 OpenShiftContainerServiceVMSize = "Standard_D2s_v3"
+ // StandardD32sV3 ...
+ StandardD32sV3 OpenShiftContainerServiceVMSize = "Standard_D32s_v3"
+ // StandardD4sV3 ...
+ StandardD4sV3 OpenShiftContainerServiceVMSize = "Standard_D4s_v3"
+ // StandardD64sV3 ...
+ StandardD64sV3 OpenShiftContainerServiceVMSize = "Standard_D64s_v3"
+ // StandardD8sV3 ...
+ StandardD8sV3 OpenShiftContainerServiceVMSize = "Standard_D8s_v3"
+ // StandardDS12V2 ...
+ StandardDS12V2 OpenShiftContainerServiceVMSize = "Standard_DS12_v2"
+ // StandardDS13V2 ...
+ StandardDS13V2 OpenShiftContainerServiceVMSize = "Standard_DS13_v2"
+ // StandardDS14V2 ...
+ StandardDS14V2 OpenShiftContainerServiceVMSize = "Standard_DS14_v2"
+ // StandardDS15V2 ...
+ StandardDS15V2 OpenShiftContainerServiceVMSize = "Standard_DS15_v2"
+ // StandardDS4V2 ...
+ StandardDS4V2 OpenShiftContainerServiceVMSize = "Standard_DS4_v2"
+ // StandardDS5V2 ...
+ StandardDS5V2 OpenShiftContainerServiceVMSize = "Standard_DS5_v2"
+ // StandardE16sV3 ...
+ StandardE16sV3 OpenShiftContainerServiceVMSize = "Standard_E16s_v3"
+ // StandardE20sV3 ...
+ StandardE20sV3 OpenShiftContainerServiceVMSize = "Standard_E20s_v3"
+ // StandardE32sV3 ...
+ StandardE32sV3 OpenShiftContainerServiceVMSize = "Standard_E32s_v3"
+ // StandardE4sV3 ...
+ StandardE4sV3 OpenShiftContainerServiceVMSize = "Standard_E4s_v3"
+ // StandardE64sV3 ...
+ StandardE64sV3 OpenShiftContainerServiceVMSize = "Standard_E64s_v3"
+ // StandardE8sV3 ...
+ StandardE8sV3 OpenShiftContainerServiceVMSize = "Standard_E8s_v3"
+ // StandardF16s ...
+ StandardF16s OpenShiftContainerServiceVMSize = "Standard_F16s"
+ // StandardF16sV2 ...
+ StandardF16sV2 OpenShiftContainerServiceVMSize = "Standard_F16s_v2"
+ // StandardF32sV2 ...
+ StandardF32sV2 OpenShiftContainerServiceVMSize = "Standard_F32s_v2"
+ // StandardF64sV2 ...
+ StandardF64sV2 OpenShiftContainerServiceVMSize = "Standard_F64s_v2"
+ // StandardF72sV2 ...
+ StandardF72sV2 OpenShiftContainerServiceVMSize = "Standard_F72s_v2"
+ // StandardF8s ...
+ StandardF8s OpenShiftContainerServiceVMSize = "Standard_F8s"
+ // StandardF8sV2 ...
+ StandardF8sV2 OpenShiftContainerServiceVMSize = "Standard_F8s_v2"
+ // StandardGS2 ...
+ StandardGS2 OpenShiftContainerServiceVMSize = "Standard_GS2"
+ // StandardGS3 ...
+ StandardGS3 OpenShiftContainerServiceVMSize = "Standard_GS3"
+ // StandardGS4 ...
+ StandardGS4 OpenShiftContainerServiceVMSize = "Standard_GS4"
+ // StandardGS5 ...
+ StandardGS5 OpenShiftContainerServiceVMSize = "Standard_GS5"
+ // StandardL16s ...
+ StandardL16s OpenShiftContainerServiceVMSize = "Standard_L16s"
+ // StandardL32s ...
+ StandardL32s OpenShiftContainerServiceVMSize = "Standard_L32s"
+ // StandardL4s ...
+ StandardL4s OpenShiftContainerServiceVMSize = "Standard_L4s"
+ // StandardL8s ...
+ StandardL8s OpenShiftContainerServiceVMSize = "Standard_L8s"
+)
+
+// PossibleOpenShiftContainerServiceVMSizeValues returns an array of possible values for the OpenShiftContainerServiceVMSize const type.
+func PossibleOpenShiftContainerServiceVMSizeValues() []OpenShiftContainerServiceVMSize {
+ return []OpenShiftContainerServiceVMSize{StandardD16sV3, StandardD2sV3, StandardD32sV3, StandardD4sV3, StandardD64sV3, StandardD8sV3, StandardDS12V2, StandardDS13V2, StandardDS14V2, StandardDS15V2, StandardDS4V2, StandardDS5V2, StandardE16sV3, StandardE20sV3, StandardE32sV3, StandardE4sV3, StandardE64sV3, StandardE8sV3, StandardF16s, StandardF16sV2, StandardF32sV2, StandardF64sV2, StandardF72sV2, StandardF8s, StandardF8sV2, StandardGS2, StandardGS3, StandardGS4, StandardGS5, StandardL16s, StandardL32s, StandardL4s, StandardL8s}
+}
+
+// OrchestratorTypes enumerates the values for orchestrator types.
+type OrchestratorTypes string
+
+const (
+ // Custom ...
+ Custom OrchestratorTypes = "Custom"
+ // DCOS ...
+ DCOS OrchestratorTypes = "DCOS"
+ // DockerCE ...
+ DockerCE OrchestratorTypes = "DockerCE"
+ // Kubernetes ...
+ Kubernetes OrchestratorTypes = "Kubernetes"
+ // Swarm ...
+ Swarm OrchestratorTypes = "Swarm"
+)
+
+// PossibleOrchestratorTypesValues returns an array of possible values for the OrchestratorTypes const type.
+func PossibleOrchestratorTypesValues() []OrchestratorTypes {
+ return []OrchestratorTypes{Custom, DCOS, DockerCE, Kubernetes, Swarm}
+}
+
+// OSType enumerates the values for os type.
+type OSType string
+
+const (
+ // Linux ...
+ Linux OSType = "Linux"
+ // Windows ...
+ Windows OSType = "Windows"
+)
+
+// PossibleOSTypeValues returns an array of possible values for the OSType const type.
+func PossibleOSTypeValues() []OSType {
+ return []OSType{Linux, Windows}
+}
+
+// ResourceIdentityType enumerates the values for resource identity type.
+type ResourceIdentityType string
+
+const (
+ // None ...
+ None ResourceIdentityType = "None"
+ // SystemAssigned ...
+ SystemAssigned ResourceIdentityType = "SystemAssigned"
+)
+
+// PossibleResourceIdentityTypeValues returns an array of possible values for the ResourceIdentityType const type.
+func PossibleResourceIdentityTypeValues() []ResourceIdentityType {
+ return []ResourceIdentityType{None, SystemAssigned}
+}
+
+// ScaleSetEvictionPolicy enumerates the values for scale set eviction policy.
+type ScaleSetEvictionPolicy string
+
+const (
+ // Deallocate ...
+ Deallocate ScaleSetEvictionPolicy = "Deallocate"
+ // Delete ...
+ Delete ScaleSetEvictionPolicy = "Delete"
+)
+
+// PossibleScaleSetEvictionPolicyValues returns an array of possible values for the ScaleSetEvictionPolicy const type.
+func PossibleScaleSetEvictionPolicyValues() []ScaleSetEvictionPolicy {
+ return []ScaleSetEvictionPolicy{Deallocate, Delete}
+}
+
+// ScaleSetPriority enumerates the values for scale set priority.
+type ScaleSetPriority string
+
+const (
+ // Low ...
+ Low ScaleSetPriority = "Low"
+ // Regular ...
+ Regular ScaleSetPriority = "Regular"
+)
+
+// PossibleScaleSetPriorityValues returns an array of possible values for the ScaleSetPriority const type.
+func PossibleScaleSetPriorityValues() []ScaleSetPriority {
+ return []ScaleSetPriority{Low, Regular}
+}
+
+// StorageProfileTypes enumerates the values for storage profile types.
+type StorageProfileTypes string
+
+const (
+ // ManagedDisks ...
+ ManagedDisks StorageProfileTypes = "ManagedDisks"
+ // StorageAccount ...
+ StorageAccount StorageProfileTypes = "StorageAccount"
+)
+
+// PossibleStorageProfileTypesValues returns an array of possible values for the StorageProfileTypes const type.
+func PossibleStorageProfileTypesValues() []StorageProfileTypes {
+ return []StorageProfileTypes{ManagedDisks, StorageAccount}
+}
+
+// VMSizeTypes enumerates the values for vm size types.
+type VMSizeTypes string
+
+const (
+ // VMSizeTypesStandardA1 ...
+ VMSizeTypesStandardA1 VMSizeTypes = "Standard_A1"
+ // VMSizeTypesStandardA10 ...
+ VMSizeTypesStandardA10 VMSizeTypes = "Standard_A10"
+ // VMSizeTypesStandardA11 ...
+ VMSizeTypesStandardA11 VMSizeTypes = "Standard_A11"
+ // VMSizeTypesStandardA1V2 ...
+ VMSizeTypesStandardA1V2 VMSizeTypes = "Standard_A1_v2"
+ // VMSizeTypesStandardA2 ...
+ VMSizeTypesStandardA2 VMSizeTypes = "Standard_A2"
+ // VMSizeTypesStandardA2mV2 ...
+ VMSizeTypesStandardA2mV2 VMSizeTypes = "Standard_A2m_v2"
+ // VMSizeTypesStandardA2V2 ...
+ VMSizeTypesStandardA2V2 VMSizeTypes = "Standard_A2_v2"
+ // VMSizeTypesStandardA3 ...
+ VMSizeTypesStandardA3 VMSizeTypes = "Standard_A3"
+ // VMSizeTypesStandardA4 ...
+ VMSizeTypesStandardA4 VMSizeTypes = "Standard_A4"
+ // VMSizeTypesStandardA4mV2 ...
+ VMSizeTypesStandardA4mV2 VMSizeTypes = "Standard_A4m_v2"
+ // VMSizeTypesStandardA4V2 ...
+ VMSizeTypesStandardA4V2 VMSizeTypes = "Standard_A4_v2"
+ // VMSizeTypesStandardA5 ...
+ VMSizeTypesStandardA5 VMSizeTypes = "Standard_A5"
+ // VMSizeTypesStandardA6 ...
+ VMSizeTypesStandardA6 VMSizeTypes = "Standard_A6"
+ // VMSizeTypesStandardA7 ...
+ VMSizeTypesStandardA7 VMSizeTypes = "Standard_A7"
+ // VMSizeTypesStandardA8 ...
+ VMSizeTypesStandardA8 VMSizeTypes = "Standard_A8"
+ // VMSizeTypesStandardA8mV2 ...
+ VMSizeTypesStandardA8mV2 VMSizeTypes = "Standard_A8m_v2"
+ // VMSizeTypesStandardA8V2 ...
+ VMSizeTypesStandardA8V2 VMSizeTypes = "Standard_A8_v2"
+ // VMSizeTypesStandardA9 ...
+ VMSizeTypesStandardA9 VMSizeTypes = "Standard_A9"
+ // VMSizeTypesStandardB2ms ...
+ VMSizeTypesStandardB2ms VMSizeTypes = "Standard_B2ms"
+ // VMSizeTypesStandardB2s ...
+ VMSizeTypesStandardB2s VMSizeTypes = "Standard_B2s"
+ // VMSizeTypesStandardB4ms ...
+ VMSizeTypesStandardB4ms VMSizeTypes = "Standard_B4ms"
+ // VMSizeTypesStandardB8ms ...
+ VMSizeTypesStandardB8ms VMSizeTypes = "Standard_B8ms"
+ // VMSizeTypesStandardD1 ...
+ VMSizeTypesStandardD1 VMSizeTypes = "Standard_D1"
+ // VMSizeTypesStandardD11 ...
+ VMSizeTypesStandardD11 VMSizeTypes = "Standard_D11"
+ // VMSizeTypesStandardD11V2 ...
+ VMSizeTypesStandardD11V2 VMSizeTypes = "Standard_D11_v2"
+ // VMSizeTypesStandardD11V2Promo ...
+ VMSizeTypesStandardD11V2Promo VMSizeTypes = "Standard_D11_v2_Promo"
+ // VMSizeTypesStandardD12 ...
+ VMSizeTypesStandardD12 VMSizeTypes = "Standard_D12"
+ // VMSizeTypesStandardD12V2 ...
+ VMSizeTypesStandardD12V2 VMSizeTypes = "Standard_D12_v2"
+ // VMSizeTypesStandardD12V2Promo ...
+ VMSizeTypesStandardD12V2Promo VMSizeTypes = "Standard_D12_v2_Promo"
+ // VMSizeTypesStandardD13 ...
+ VMSizeTypesStandardD13 VMSizeTypes = "Standard_D13"
+ // VMSizeTypesStandardD13V2 ...
+ VMSizeTypesStandardD13V2 VMSizeTypes = "Standard_D13_v2"
+ // VMSizeTypesStandardD13V2Promo ...
+ VMSizeTypesStandardD13V2Promo VMSizeTypes = "Standard_D13_v2_Promo"
+ // VMSizeTypesStandardD14 ...
+ VMSizeTypesStandardD14 VMSizeTypes = "Standard_D14"
+ // VMSizeTypesStandardD14V2 ...
+ VMSizeTypesStandardD14V2 VMSizeTypes = "Standard_D14_v2"
+ // VMSizeTypesStandardD14V2Promo ...
+ VMSizeTypesStandardD14V2Promo VMSizeTypes = "Standard_D14_v2_Promo"
+ // VMSizeTypesStandardD15V2 ...
+ VMSizeTypesStandardD15V2 VMSizeTypes = "Standard_D15_v2"
+ // VMSizeTypesStandardD16sV3 ...
+ VMSizeTypesStandardD16sV3 VMSizeTypes = "Standard_D16s_v3"
+ // VMSizeTypesStandardD16V3 ...
+ VMSizeTypesStandardD16V3 VMSizeTypes = "Standard_D16_v3"
+ // VMSizeTypesStandardD1V2 ...
+ VMSizeTypesStandardD1V2 VMSizeTypes = "Standard_D1_v2"
+ // VMSizeTypesStandardD2 ...
+ VMSizeTypesStandardD2 VMSizeTypes = "Standard_D2"
+ // VMSizeTypesStandardD2sV3 ...
+ VMSizeTypesStandardD2sV3 VMSizeTypes = "Standard_D2s_v3"
+ // VMSizeTypesStandardD2V2 ...
+ VMSizeTypesStandardD2V2 VMSizeTypes = "Standard_D2_v2"
+ // VMSizeTypesStandardD2V2Promo ...
+ VMSizeTypesStandardD2V2Promo VMSizeTypes = "Standard_D2_v2_Promo"
+ // VMSizeTypesStandardD2V3 ...
+ VMSizeTypesStandardD2V3 VMSizeTypes = "Standard_D2_v3"
+ // VMSizeTypesStandardD3 ...
+ VMSizeTypesStandardD3 VMSizeTypes = "Standard_D3"
+ // VMSizeTypesStandardD32sV3 ...
+ VMSizeTypesStandardD32sV3 VMSizeTypes = "Standard_D32s_v3"
+ // VMSizeTypesStandardD32V3 ...
+ VMSizeTypesStandardD32V3 VMSizeTypes = "Standard_D32_v3"
+ // VMSizeTypesStandardD3V2 ...
+ VMSizeTypesStandardD3V2 VMSizeTypes = "Standard_D3_v2"
+ // VMSizeTypesStandardD3V2Promo ...
+ VMSizeTypesStandardD3V2Promo VMSizeTypes = "Standard_D3_v2_Promo"
+ // VMSizeTypesStandardD4 ...
+ VMSizeTypesStandardD4 VMSizeTypes = "Standard_D4"
+ // VMSizeTypesStandardD4sV3 ...
+ VMSizeTypesStandardD4sV3 VMSizeTypes = "Standard_D4s_v3"
+ // VMSizeTypesStandardD4V2 ...
+ VMSizeTypesStandardD4V2 VMSizeTypes = "Standard_D4_v2"
+ // VMSizeTypesStandardD4V2Promo ...
+ VMSizeTypesStandardD4V2Promo VMSizeTypes = "Standard_D4_v2_Promo"
+ // VMSizeTypesStandardD4V3 ...
+ VMSizeTypesStandardD4V3 VMSizeTypes = "Standard_D4_v3"
+ // VMSizeTypesStandardD5V2 ...
+ VMSizeTypesStandardD5V2 VMSizeTypes = "Standard_D5_v2"
+ // VMSizeTypesStandardD5V2Promo ...
+ VMSizeTypesStandardD5V2Promo VMSizeTypes = "Standard_D5_v2_Promo"
+ // VMSizeTypesStandardD64sV3 ...
+ VMSizeTypesStandardD64sV3 VMSizeTypes = "Standard_D64s_v3"
+ // VMSizeTypesStandardD64V3 ...
+ VMSizeTypesStandardD64V3 VMSizeTypes = "Standard_D64_v3"
+ // VMSizeTypesStandardD8sV3 ...
+ VMSizeTypesStandardD8sV3 VMSizeTypes = "Standard_D8s_v3"
+ // VMSizeTypesStandardD8V3 ...
+ VMSizeTypesStandardD8V3 VMSizeTypes = "Standard_D8_v3"
+ // VMSizeTypesStandardDS1 ...
+ VMSizeTypesStandardDS1 VMSizeTypes = "Standard_DS1"
+ // VMSizeTypesStandardDS11 ...
+ VMSizeTypesStandardDS11 VMSizeTypes = "Standard_DS11"
+ // VMSizeTypesStandardDS11V2 ...
+ VMSizeTypesStandardDS11V2 VMSizeTypes = "Standard_DS11_v2"
+ // VMSizeTypesStandardDS11V2Promo ...
+ VMSizeTypesStandardDS11V2Promo VMSizeTypes = "Standard_DS11_v2_Promo"
+ // VMSizeTypesStandardDS12 ...
+ VMSizeTypesStandardDS12 VMSizeTypes = "Standard_DS12"
+ // VMSizeTypesStandardDS12V2 ...
+ VMSizeTypesStandardDS12V2 VMSizeTypes = "Standard_DS12_v2"
+ // VMSizeTypesStandardDS12V2Promo ...
+ VMSizeTypesStandardDS12V2Promo VMSizeTypes = "Standard_DS12_v2_Promo"
+ // VMSizeTypesStandardDS13 ...
+ VMSizeTypesStandardDS13 VMSizeTypes = "Standard_DS13"
+ // VMSizeTypesStandardDS132V2 ...
+ VMSizeTypesStandardDS132V2 VMSizeTypes = "Standard_DS13-2_v2"
+ // VMSizeTypesStandardDS134V2 ...
+ VMSizeTypesStandardDS134V2 VMSizeTypes = "Standard_DS13-4_v2"
+ // VMSizeTypesStandardDS13V2 ...
+ VMSizeTypesStandardDS13V2 VMSizeTypes = "Standard_DS13_v2"
+ // VMSizeTypesStandardDS13V2Promo ...
+ VMSizeTypesStandardDS13V2Promo VMSizeTypes = "Standard_DS13_v2_Promo"
+ // VMSizeTypesStandardDS14 ...
+ VMSizeTypesStandardDS14 VMSizeTypes = "Standard_DS14"
+ // VMSizeTypesStandardDS144V2 ...
+ VMSizeTypesStandardDS144V2 VMSizeTypes = "Standard_DS14-4_v2"
+ // VMSizeTypesStandardDS148V2 ...
+ VMSizeTypesStandardDS148V2 VMSizeTypes = "Standard_DS14-8_v2"
+ // VMSizeTypesStandardDS14V2 ...
+ VMSizeTypesStandardDS14V2 VMSizeTypes = "Standard_DS14_v2"
+ // VMSizeTypesStandardDS14V2Promo ...
+ VMSizeTypesStandardDS14V2Promo VMSizeTypes = "Standard_DS14_v2_Promo"
+ // VMSizeTypesStandardDS15V2 ...
+ VMSizeTypesStandardDS15V2 VMSizeTypes = "Standard_DS15_v2"
+ // VMSizeTypesStandardDS1V2 ...
+ VMSizeTypesStandardDS1V2 VMSizeTypes = "Standard_DS1_v2"
+ // VMSizeTypesStandardDS2 ...
+ VMSizeTypesStandardDS2 VMSizeTypes = "Standard_DS2"
+ // VMSizeTypesStandardDS2V2 ...
+ VMSizeTypesStandardDS2V2 VMSizeTypes = "Standard_DS2_v2"
+ // VMSizeTypesStandardDS2V2Promo ...
+ VMSizeTypesStandardDS2V2Promo VMSizeTypes = "Standard_DS2_v2_Promo"
+ // VMSizeTypesStandardDS3 ...
+ VMSizeTypesStandardDS3 VMSizeTypes = "Standard_DS3"
+ // VMSizeTypesStandardDS3V2 ...
+ VMSizeTypesStandardDS3V2 VMSizeTypes = "Standard_DS3_v2"
+ // VMSizeTypesStandardDS3V2Promo ...
+ VMSizeTypesStandardDS3V2Promo VMSizeTypes = "Standard_DS3_v2_Promo"
+ // VMSizeTypesStandardDS4 ...
+ VMSizeTypesStandardDS4 VMSizeTypes = "Standard_DS4"
+ // VMSizeTypesStandardDS4V2 ...
+ VMSizeTypesStandardDS4V2 VMSizeTypes = "Standard_DS4_v2"
+ // VMSizeTypesStandardDS4V2Promo ...
+ VMSizeTypesStandardDS4V2Promo VMSizeTypes = "Standard_DS4_v2_Promo"
+ // VMSizeTypesStandardDS5V2 ...
+ VMSizeTypesStandardDS5V2 VMSizeTypes = "Standard_DS5_v2"
+ // VMSizeTypesStandardDS5V2Promo ...
+ VMSizeTypesStandardDS5V2Promo VMSizeTypes = "Standard_DS5_v2_Promo"
+ // VMSizeTypesStandardE16sV3 ...
+ VMSizeTypesStandardE16sV3 VMSizeTypes = "Standard_E16s_v3"
+ // VMSizeTypesStandardE16V3 ...
+ VMSizeTypesStandardE16V3 VMSizeTypes = "Standard_E16_v3"
+ // VMSizeTypesStandardE2sV3 ...
+ VMSizeTypesStandardE2sV3 VMSizeTypes = "Standard_E2s_v3"
+ // VMSizeTypesStandardE2V3 ...
+ VMSizeTypesStandardE2V3 VMSizeTypes = "Standard_E2_v3"
+ // VMSizeTypesStandardE3216sV3 ...
+ VMSizeTypesStandardE3216sV3 VMSizeTypes = "Standard_E32-16s_v3"
+ // VMSizeTypesStandardE328sV3 ...
+ VMSizeTypesStandardE328sV3 VMSizeTypes = "Standard_E32-8s_v3"
+ // VMSizeTypesStandardE32sV3 ...
+ VMSizeTypesStandardE32sV3 VMSizeTypes = "Standard_E32s_v3"
+ // VMSizeTypesStandardE32V3 ...
+ VMSizeTypesStandardE32V3 VMSizeTypes = "Standard_E32_v3"
+ // VMSizeTypesStandardE4sV3 ...
+ VMSizeTypesStandardE4sV3 VMSizeTypes = "Standard_E4s_v3"
+ // VMSizeTypesStandardE4V3 ...
+ VMSizeTypesStandardE4V3 VMSizeTypes = "Standard_E4_v3"
+ // VMSizeTypesStandardE6416sV3 ...
+ VMSizeTypesStandardE6416sV3 VMSizeTypes = "Standard_E64-16s_v3"
+ // VMSizeTypesStandardE6432sV3 ...
+ VMSizeTypesStandardE6432sV3 VMSizeTypes = "Standard_E64-32s_v3"
+ // VMSizeTypesStandardE64sV3 ...
+ VMSizeTypesStandardE64sV3 VMSizeTypes = "Standard_E64s_v3"
+ // VMSizeTypesStandardE64V3 ...
+ VMSizeTypesStandardE64V3 VMSizeTypes = "Standard_E64_v3"
+ // VMSizeTypesStandardE8sV3 ...
+ VMSizeTypesStandardE8sV3 VMSizeTypes = "Standard_E8s_v3"
+ // VMSizeTypesStandardE8V3 ...
+ VMSizeTypesStandardE8V3 VMSizeTypes = "Standard_E8_v3"
+ // VMSizeTypesStandardF1 ...
+ VMSizeTypesStandardF1 VMSizeTypes = "Standard_F1"
+ // VMSizeTypesStandardF16 ...
+ VMSizeTypesStandardF16 VMSizeTypes = "Standard_F16"
+ // VMSizeTypesStandardF16s ...
+ VMSizeTypesStandardF16s VMSizeTypes = "Standard_F16s"
+ // VMSizeTypesStandardF16sV2 ...
+ VMSizeTypesStandardF16sV2 VMSizeTypes = "Standard_F16s_v2"
+ // VMSizeTypesStandardF1s ...
+ VMSizeTypesStandardF1s VMSizeTypes = "Standard_F1s"
+ // VMSizeTypesStandardF2 ...
+ VMSizeTypesStandardF2 VMSizeTypes = "Standard_F2"
+ // VMSizeTypesStandardF2s ...
+ VMSizeTypesStandardF2s VMSizeTypes = "Standard_F2s"
+ // VMSizeTypesStandardF2sV2 ...
+ VMSizeTypesStandardF2sV2 VMSizeTypes = "Standard_F2s_v2"
+ // VMSizeTypesStandardF32sV2 ...
+ VMSizeTypesStandardF32sV2 VMSizeTypes = "Standard_F32s_v2"
+ // VMSizeTypesStandardF4 ...
+ VMSizeTypesStandardF4 VMSizeTypes = "Standard_F4"
+ // VMSizeTypesStandardF4s ...
+ VMSizeTypesStandardF4s VMSizeTypes = "Standard_F4s"
+ // VMSizeTypesStandardF4sV2 ...
+ VMSizeTypesStandardF4sV2 VMSizeTypes = "Standard_F4s_v2"
+ // VMSizeTypesStandardF64sV2 ...
+ VMSizeTypesStandardF64sV2 VMSizeTypes = "Standard_F64s_v2"
+ // VMSizeTypesStandardF72sV2 ...
+ VMSizeTypesStandardF72sV2 VMSizeTypes = "Standard_F72s_v2"
+ // VMSizeTypesStandardF8 ...
+ VMSizeTypesStandardF8 VMSizeTypes = "Standard_F8"
+ // VMSizeTypesStandardF8s ...
+ VMSizeTypesStandardF8s VMSizeTypes = "Standard_F8s"
+ // VMSizeTypesStandardF8sV2 ...
+ VMSizeTypesStandardF8sV2 VMSizeTypes = "Standard_F8s_v2"
+ // VMSizeTypesStandardG1 ...
+ VMSizeTypesStandardG1 VMSizeTypes = "Standard_G1"
+ // VMSizeTypesStandardG2 ...
+ VMSizeTypesStandardG2 VMSizeTypes = "Standard_G2"
+ // VMSizeTypesStandardG3 ...
+ VMSizeTypesStandardG3 VMSizeTypes = "Standard_G3"
+ // VMSizeTypesStandardG4 ...
+ VMSizeTypesStandardG4 VMSizeTypes = "Standard_G4"
+ // VMSizeTypesStandardG5 ...
+ VMSizeTypesStandardG5 VMSizeTypes = "Standard_G5"
+ // VMSizeTypesStandardGS1 ...
+ VMSizeTypesStandardGS1 VMSizeTypes = "Standard_GS1"
+ // VMSizeTypesStandardGS2 ...
+ VMSizeTypesStandardGS2 VMSizeTypes = "Standard_GS2"
+ // VMSizeTypesStandardGS3 ...
+ VMSizeTypesStandardGS3 VMSizeTypes = "Standard_GS3"
+ // VMSizeTypesStandardGS4 ...
+ VMSizeTypesStandardGS4 VMSizeTypes = "Standard_GS4"
+ // VMSizeTypesStandardGS44 ...
+ VMSizeTypesStandardGS44 VMSizeTypes = "Standard_GS4-4"
+ // VMSizeTypesStandardGS48 ...
+ VMSizeTypesStandardGS48 VMSizeTypes = "Standard_GS4-8"
+ // VMSizeTypesStandardGS5 ...
+ VMSizeTypesStandardGS5 VMSizeTypes = "Standard_GS5"
+ // VMSizeTypesStandardGS516 ...
+ VMSizeTypesStandardGS516 VMSizeTypes = "Standard_GS5-16"
+ // VMSizeTypesStandardGS58 ...
+ VMSizeTypesStandardGS58 VMSizeTypes = "Standard_GS5-8"
+ // VMSizeTypesStandardH16 ...
+ VMSizeTypesStandardH16 VMSizeTypes = "Standard_H16"
+ // VMSizeTypesStandardH16m ...
+ VMSizeTypesStandardH16m VMSizeTypes = "Standard_H16m"
+ // VMSizeTypesStandardH16mr ...
+ VMSizeTypesStandardH16mr VMSizeTypes = "Standard_H16mr"
+ // VMSizeTypesStandardH16r ...
+ VMSizeTypesStandardH16r VMSizeTypes = "Standard_H16r"
+ // VMSizeTypesStandardH8 ...
+ VMSizeTypesStandardH8 VMSizeTypes = "Standard_H8"
+ // VMSizeTypesStandardH8m ...
+ VMSizeTypesStandardH8m VMSizeTypes = "Standard_H8m"
+ // VMSizeTypesStandardL16s ...
+ VMSizeTypesStandardL16s VMSizeTypes = "Standard_L16s"
+ // VMSizeTypesStandardL32s ...
+ VMSizeTypesStandardL32s VMSizeTypes = "Standard_L32s"
+ // VMSizeTypesStandardL4s ...
+ VMSizeTypesStandardL4s VMSizeTypes = "Standard_L4s"
+ // VMSizeTypesStandardL8s ...
+ VMSizeTypesStandardL8s VMSizeTypes = "Standard_L8s"
+ // VMSizeTypesStandardM12832ms ...
+ VMSizeTypesStandardM12832ms VMSizeTypes = "Standard_M128-32ms"
+ // VMSizeTypesStandardM12864ms ...
+ VMSizeTypesStandardM12864ms VMSizeTypes = "Standard_M128-64ms"
+ // VMSizeTypesStandardM128ms ...
+ VMSizeTypesStandardM128ms VMSizeTypes = "Standard_M128ms"
+ // VMSizeTypesStandardM128s ...
+ VMSizeTypesStandardM128s VMSizeTypes = "Standard_M128s"
+ // VMSizeTypesStandardM6416ms ...
+ VMSizeTypesStandardM6416ms VMSizeTypes = "Standard_M64-16ms"
+ // VMSizeTypesStandardM6432ms ...
+ VMSizeTypesStandardM6432ms VMSizeTypes = "Standard_M64-32ms"
+ // VMSizeTypesStandardM64ms ...
+ VMSizeTypesStandardM64ms VMSizeTypes = "Standard_M64ms"
+ // VMSizeTypesStandardM64s ...
+ VMSizeTypesStandardM64s VMSizeTypes = "Standard_M64s"
+ // VMSizeTypesStandardNC12 ...
+ VMSizeTypesStandardNC12 VMSizeTypes = "Standard_NC12"
+ // VMSizeTypesStandardNC12sV2 ...
+ VMSizeTypesStandardNC12sV2 VMSizeTypes = "Standard_NC12s_v2"
+ // VMSizeTypesStandardNC12sV3 ...
+ VMSizeTypesStandardNC12sV3 VMSizeTypes = "Standard_NC12s_v3"
+ // VMSizeTypesStandardNC24 ...
+ VMSizeTypesStandardNC24 VMSizeTypes = "Standard_NC24"
+ // VMSizeTypesStandardNC24r ...
+ VMSizeTypesStandardNC24r VMSizeTypes = "Standard_NC24r"
+ // VMSizeTypesStandardNC24rsV2 ...
+ VMSizeTypesStandardNC24rsV2 VMSizeTypes = "Standard_NC24rs_v2"
+ // VMSizeTypesStandardNC24rsV3 ...
+ VMSizeTypesStandardNC24rsV3 VMSizeTypes = "Standard_NC24rs_v3"
+ // VMSizeTypesStandardNC24sV2 ...
+ VMSizeTypesStandardNC24sV2 VMSizeTypes = "Standard_NC24s_v2"
+ // VMSizeTypesStandardNC24sV3 ...
+ VMSizeTypesStandardNC24sV3 VMSizeTypes = "Standard_NC24s_v3"
+ // VMSizeTypesStandardNC6 ...
+ VMSizeTypesStandardNC6 VMSizeTypes = "Standard_NC6"
+ // VMSizeTypesStandardNC6sV2 ...
+ VMSizeTypesStandardNC6sV2 VMSizeTypes = "Standard_NC6s_v2"
+ // VMSizeTypesStandardNC6sV3 ...
+ VMSizeTypesStandardNC6sV3 VMSizeTypes = "Standard_NC6s_v3"
+ // VMSizeTypesStandardND12s ...
+ VMSizeTypesStandardND12s VMSizeTypes = "Standard_ND12s"
+ // VMSizeTypesStandardND24rs ...
+ VMSizeTypesStandardND24rs VMSizeTypes = "Standard_ND24rs"
+ // VMSizeTypesStandardND24s ...
+ VMSizeTypesStandardND24s VMSizeTypes = "Standard_ND24s"
+ // VMSizeTypesStandardND6s ...
+ VMSizeTypesStandardND6s VMSizeTypes = "Standard_ND6s"
+ // VMSizeTypesStandardNV12 ...
+ VMSizeTypesStandardNV12 VMSizeTypes = "Standard_NV12"
+ // VMSizeTypesStandardNV24 ...
+ VMSizeTypesStandardNV24 VMSizeTypes = "Standard_NV24"
+ // VMSizeTypesStandardNV6 ...
+ VMSizeTypesStandardNV6 VMSizeTypes = "Standard_NV6"
+)
+
+// PossibleVMSizeTypesValues returns an array of possible values for the VMSizeTypes const type.
+func PossibleVMSizeTypesValues() []VMSizeTypes {
+ return []VMSizeTypes{VMSizeTypesStandardA1, VMSizeTypesStandardA10, VMSizeTypesStandardA11, VMSizeTypesStandardA1V2, VMSizeTypesStandardA2, VMSizeTypesStandardA2mV2, VMSizeTypesStandardA2V2, VMSizeTypesStandardA3, VMSizeTypesStandardA4, VMSizeTypesStandardA4mV2, VMSizeTypesStandardA4V2, VMSizeTypesStandardA5, VMSizeTypesStandardA6, VMSizeTypesStandardA7, VMSizeTypesStandardA8, VMSizeTypesStandardA8mV2, VMSizeTypesStandardA8V2, VMSizeTypesStandardA9, VMSizeTypesStandardB2ms, VMSizeTypesStandardB2s, VMSizeTypesStandardB4ms, VMSizeTypesStandardB8ms, VMSizeTypesStandardD1, VMSizeTypesStandardD11, VMSizeTypesStandardD11V2, VMSizeTypesStandardD11V2Promo, VMSizeTypesStandardD12, VMSizeTypesStandardD12V2, VMSizeTypesStandardD12V2Promo, VMSizeTypesStandardD13, VMSizeTypesStandardD13V2, VMSizeTypesStandardD13V2Promo, VMSizeTypesStandardD14, VMSizeTypesStandardD14V2, VMSizeTypesStandardD14V2Promo, VMSizeTypesStandardD15V2, VMSizeTypesStandardD16sV3, VMSizeTypesStandardD16V3, VMSizeTypesStandardD1V2, VMSizeTypesStandardD2, VMSizeTypesStandardD2sV3, VMSizeTypesStandardD2V2, VMSizeTypesStandardD2V2Promo, VMSizeTypesStandardD2V3, VMSizeTypesStandardD3, VMSizeTypesStandardD32sV3, VMSizeTypesStandardD32V3, VMSizeTypesStandardD3V2, VMSizeTypesStandardD3V2Promo, VMSizeTypesStandardD4, VMSizeTypesStandardD4sV3, VMSizeTypesStandardD4V2, VMSizeTypesStandardD4V2Promo, VMSizeTypesStandardD4V3, VMSizeTypesStandardD5V2, VMSizeTypesStandardD5V2Promo, VMSizeTypesStandardD64sV3, VMSizeTypesStandardD64V3, VMSizeTypesStandardD8sV3, VMSizeTypesStandardD8V3, VMSizeTypesStandardDS1, VMSizeTypesStandardDS11, VMSizeTypesStandardDS11V2, VMSizeTypesStandardDS11V2Promo, VMSizeTypesStandardDS12, VMSizeTypesStandardDS12V2, VMSizeTypesStandardDS12V2Promo, VMSizeTypesStandardDS13, VMSizeTypesStandardDS132V2, VMSizeTypesStandardDS134V2, VMSizeTypesStandardDS13V2, VMSizeTypesStandardDS13V2Promo, VMSizeTypesStandardDS14, VMSizeTypesStandardDS144V2, VMSizeTypesStandardDS148V2, VMSizeTypesStandardDS14V2, VMSizeTypesStandardDS14V2Promo, VMSizeTypesStandardDS15V2, VMSizeTypesStandardDS1V2, VMSizeTypesStandardDS2, VMSizeTypesStandardDS2V2, VMSizeTypesStandardDS2V2Promo, VMSizeTypesStandardDS3, VMSizeTypesStandardDS3V2, VMSizeTypesStandardDS3V2Promo, VMSizeTypesStandardDS4, VMSizeTypesStandardDS4V2, VMSizeTypesStandardDS4V2Promo, VMSizeTypesStandardDS5V2, VMSizeTypesStandardDS5V2Promo, VMSizeTypesStandardE16sV3, VMSizeTypesStandardE16V3, VMSizeTypesStandardE2sV3, VMSizeTypesStandardE2V3, VMSizeTypesStandardE3216sV3, VMSizeTypesStandardE328sV3, VMSizeTypesStandardE32sV3, VMSizeTypesStandardE32V3, VMSizeTypesStandardE4sV3, VMSizeTypesStandardE4V3, VMSizeTypesStandardE6416sV3, VMSizeTypesStandardE6432sV3, VMSizeTypesStandardE64sV3, VMSizeTypesStandardE64V3, VMSizeTypesStandardE8sV3, VMSizeTypesStandardE8V3, VMSizeTypesStandardF1, VMSizeTypesStandardF16, VMSizeTypesStandardF16s, VMSizeTypesStandardF16sV2, VMSizeTypesStandardF1s, VMSizeTypesStandardF2, VMSizeTypesStandardF2s, VMSizeTypesStandardF2sV2, VMSizeTypesStandardF32sV2, VMSizeTypesStandardF4, VMSizeTypesStandardF4s, VMSizeTypesStandardF4sV2, VMSizeTypesStandardF64sV2, VMSizeTypesStandardF72sV2, VMSizeTypesStandardF8, VMSizeTypesStandardF8s, VMSizeTypesStandardF8sV2, VMSizeTypesStandardG1, VMSizeTypesStandardG2, VMSizeTypesStandardG3, VMSizeTypesStandardG4, VMSizeTypesStandardG5, VMSizeTypesStandardGS1, VMSizeTypesStandardGS2, VMSizeTypesStandardGS3, VMSizeTypesStandardGS4, VMSizeTypesStandardGS44, VMSizeTypesStandardGS48, VMSizeTypesStandardGS5, VMSizeTypesStandardGS516, VMSizeTypesStandardGS58, VMSizeTypesStandardH16, VMSizeTypesStandardH16m, VMSizeTypesStandardH16mr, VMSizeTypesStandardH16r, VMSizeTypesStandardH8, VMSizeTypesStandardH8m, VMSizeTypesStandardL16s, VMSizeTypesStandardL32s, VMSizeTypesStandardL4s, VMSizeTypesStandardL8s, VMSizeTypesStandardM12832ms, VMSizeTypesStandardM12864ms, VMSizeTypesStandardM128ms, VMSizeTypesStandardM128s, VMSizeTypesStandardM6416ms, VMSizeTypesStandardM6432ms, VMSizeTypesStandardM64ms, VMSizeTypesStandardM64s, VMSizeTypesStandardNC12, VMSizeTypesStandardNC12sV2, VMSizeTypesStandardNC12sV3, VMSizeTypesStandardNC24, VMSizeTypesStandardNC24r, VMSizeTypesStandardNC24rsV2, VMSizeTypesStandardNC24rsV3, VMSizeTypesStandardNC24sV2, VMSizeTypesStandardNC24sV3, VMSizeTypesStandardNC6, VMSizeTypesStandardNC6sV2, VMSizeTypesStandardNC6sV3, VMSizeTypesStandardND12s, VMSizeTypesStandardND24rs, VMSizeTypesStandardND24s, VMSizeTypesStandardND6s, VMSizeTypesStandardNV12, VMSizeTypesStandardNV24, VMSizeTypesStandardNV6}
+}
+
+// AccessProfile profile for enabling a user to access a managed cluster.
+type AccessProfile struct {
+ // KubeConfig - Base64-encoded Kubernetes configuration file.
+ KubeConfig *[]byte `json:"kubeConfig,omitempty"`
+}
+
+// AgentPool agent Pool.
+type AgentPool struct {
+ autorest.Response `json:"-"`
+ // ManagedClusterAgentPoolProfileProperties - Properties of an agent pool.
+ *ManagedClusterAgentPoolProfileProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Resource ID.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource that is unique within a resource group. This name can be used to access the resource.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Resource type
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for AgentPool.
+func (ap AgentPool) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if ap.ManagedClusterAgentPoolProfileProperties != nil {
+ objectMap["properties"] = ap.ManagedClusterAgentPoolProfileProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for AgentPool struct.
+func (ap *AgentPool) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var managedClusterAgentPoolProfileProperties ManagedClusterAgentPoolProfileProperties
+ err = json.Unmarshal(*v, &managedClusterAgentPoolProfileProperties)
+ if err != nil {
+ return err
+ }
+ ap.ManagedClusterAgentPoolProfileProperties = &managedClusterAgentPoolProfileProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ ap.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ ap.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ ap.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// AgentPoolAvailableVersions the list of available versions for an agent pool.
+type AgentPoolAvailableVersions struct {
+ autorest.Response `json:"-"`
+ // ID - READ-ONLY; Id of the agent pool available versions.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Name of the agent pool available versions.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Type of the agent pool available versions.
+ Type *string `json:"type,omitempty"`
+ // AgentPoolAvailableVersionsProperties - Properties of agent pool available versions.
+ *AgentPoolAvailableVersionsProperties `json:"properties,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for AgentPoolAvailableVersions.
+func (apav AgentPoolAvailableVersions) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if apav.AgentPoolAvailableVersionsProperties != nil {
+ objectMap["properties"] = apav.AgentPoolAvailableVersionsProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for AgentPoolAvailableVersions struct.
+func (apav *AgentPoolAvailableVersions) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ apav.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ apav.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ apav.Type = &typeVar
+ }
+ case "properties":
+ if v != nil {
+ var agentPoolAvailableVersionsProperties AgentPoolAvailableVersionsProperties
+ err = json.Unmarshal(*v, &agentPoolAvailableVersionsProperties)
+ if err != nil {
+ return err
+ }
+ apav.AgentPoolAvailableVersionsProperties = &agentPoolAvailableVersionsProperties
+ }
+ }
+ }
+
+ return nil
+}
+
+// AgentPoolAvailableVersionsProperties the list of available agent pool versions.
+type AgentPoolAvailableVersionsProperties struct {
+ // AgentPoolVersions - List of versions available for agent pool.
+ AgentPoolVersions *[]AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem `json:"agentPoolVersions,omitempty"`
+}
+
+// AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem ...
+type AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem struct {
+ // Default - Whether this version is the default agent pool version.
+ Default *bool `json:"default,omitempty"`
+ // KubernetesVersion - Kubernetes version (major, minor, patch).
+ KubernetesVersion *string `json:"kubernetesVersion,omitempty"`
+ // IsPreview - Whether Kubernetes version is currently in preview.
+ IsPreview *bool `json:"isPreview,omitempty"`
+}
+
+// AgentPoolListResult the response from the List Agent Pools operation.
+type AgentPoolListResult struct {
+ autorest.Response `json:"-"`
+ // Value - The list of agent pools.
+ Value *[]AgentPool `json:"value,omitempty"`
+ // NextLink - READ-ONLY; The URL to get the next set of agent pool results.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// AgentPoolListResultIterator provides access to a complete listing of AgentPool values.
+type AgentPoolListResultIterator struct {
+ i int
+ page AgentPoolListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *AgentPoolListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AgentPoolListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *AgentPoolListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter AgentPoolListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter AgentPoolListResultIterator) Response() AgentPoolListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter AgentPoolListResultIterator) Value() AgentPool {
+ if !iter.page.NotDone() {
+ return AgentPool{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the AgentPoolListResultIterator type.
+func NewAgentPoolListResultIterator(page AgentPoolListResultPage) AgentPoolListResultIterator {
+ return AgentPoolListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (aplr AgentPoolListResult) IsEmpty() bool {
+ return aplr.Value == nil || len(*aplr.Value) == 0
+}
+
+// agentPoolListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (aplr AgentPoolListResult) agentPoolListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if aplr.NextLink == nil || len(to.String(aplr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(aplr.NextLink)))
+}
+
+// AgentPoolListResultPage contains a page of AgentPool values.
+type AgentPoolListResultPage struct {
+ fn func(context.Context, AgentPoolListResult) (AgentPoolListResult, error)
+ aplr AgentPoolListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *AgentPoolListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AgentPoolListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.aplr)
+ if err != nil {
+ return err
+ }
+ page.aplr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *AgentPoolListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page AgentPoolListResultPage) NotDone() bool {
+ return !page.aplr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page AgentPoolListResultPage) Response() AgentPoolListResult {
+ return page.aplr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page AgentPoolListResultPage) Values() []AgentPool {
+ if page.aplr.IsEmpty() {
+ return nil
+ }
+ return *page.aplr.Value
+}
+
+// Creates a new instance of the AgentPoolListResultPage type.
+func NewAgentPoolListResultPage(getNextPage func(context.Context, AgentPoolListResult) (AgentPoolListResult, error)) AgentPoolListResultPage {
+ return AgentPoolListResultPage{fn: getNextPage}
+}
+
+// AgentPoolProfile profile for the container service agent pool.
+type AgentPoolProfile struct {
+ // Name - Unique name of the agent pool profile in the context of the subscription and resource group.
+ Name *string `json:"name,omitempty"`
+ // Count - Number of agents (VMs) to host docker containers. Allowed values must be in the range of 1 to 100 (inclusive). The default value is 1.
+ Count *int32 `json:"count,omitempty"`
+ // VMSize - Size of agent VMs. Possible values include: 'VMSizeTypesStandardA1', 'VMSizeTypesStandardA10', 'VMSizeTypesStandardA11', 'VMSizeTypesStandardA1V2', 'VMSizeTypesStandardA2', 'VMSizeTypesStandardA2V2', 'VMSizeTypesStandardA2mV2', 'VMSizeTypesStandardA3', 'VMSizeTypesStandardA4', 'VMSizeTypesStandardA4V2', 'VMSizeTypesStandardA4mV2', 'VMSizeTypesStandardA5', 'VMSizeTypesStandardA6', 'VMSizeTypesStandardA7', 'VMSizeTypesStandardA8', 'VMSizeTypesStandardA8V2', 'VMSizeTypesStandardA8mV2', 'VMSizeTypesStandardA9', 'VMSizeTypesStandardB2ms', 'VMSizeTypesStandardB2s', 'VMSizeTypesStandardB4ms', 'VMSizeTypesStandardB8ms', 'VMSizeTypesStandardD1', 'VMSizeTypesStandardD11', 'VMSizeTypesStandardD11V2', 'VMSizeTypesStandardD11V2Promo', 'VMSizeTypesStandardD12', 'VMSizeTypesStandardD12V2', 'VMSizeTypesStandardD12V2Promo', 'VMSizeTypesStandardD13', 'VMSizeTypesStandardD13V2', 'VMSizeTypesStandardD13V2Promo', 'VMSizeTypesStandardD14', 'VMSizeTypesStandardD14V2', 'VMSizeTypesStandardD14V2Promo', 'VMSizeTypesStandardD15V2', 'VMSizeTypesStandardD16V3', 'VMSizeTypesStandardD16sV3', 'VMSizeTypesStandardD1V2', 'VMSizeTypesStandardD2', 'VMSizeTypesStandardD2V2', 'VMSizeTypesStandardD2V2Promo', 'VMSizeTypesStandardD2V3', 'VMSizeTypesStandardD2sV3', 'VMSizeTypesStandardD3', 'VMSizeTypesStandardD32V3', 'VMSizeTypesStandardD32sV3', 'VMSizeTypesStandardD3V2', 'VMSizeTypesStandardD3V2Promo', 'VMSizeTypesStandardD4', 'VMSizeTypesStandardD4V2', 'VMSizeTypesStandardD4V2Promo', 'VMSizeTypesStandardD4V3', 'VMSizeTypesStandardD4sV3', 'VMSizeTypesStandardD5V2', 'VMSizeTypesStandardD5V2Promo', 'VMSizeTypesStandardD64V3', 'VMSizeTypesStandardD64sV3', 'VMSizeTypesStandardD8V3', 'VMSizeTypesStandardD8sV3', 'VMSizeTypesStandardDS1', 'VMSizeTypesStandardDS11', 'VMSizeTypesStandardDS11V2', 'VMSizeTypesStandardDS11V2Promo', 'VMSizeTypesStandardDS12', 'VMSizeTypesStandardDS12V2', 'VMSizeTypesStandardDS12V2Promo', 'VMSizeTypesStandardDS13', 'VMSizeTypesStandardDS132V2', 'VMSizeTypesStandardDS134V2', 'VMSizeTypesStandardDS13V2', 'VMSizeTypesStandardDS13V2Promo', 'VMSizeTypesStandardDS14', 'VMSizeTypesStandardDS144V2', 'VMSizeTypesStandardDS148V2', 'VMSizeTypesStandardDS14V2', 'VMSizeTypesStandardDS14V2Promo', 'VMSizeTypesStandardDS15V2', 'VMSizeTypesStandardDS1V2', 'VMSizeTypesStandardDS2', 'VMSizeTypesStandardDS2V2', 'VMSizeTypesStandardDS2V2Promo', 'VMSizeTypesStandardDS3', 'VMSizeTypesStandardDS3V2', 'VMSizeTypesStandardDS3V2Promo', 'VMSizeTypesStandardDS4', 'VMSizeTypesStandardDS4V2', 'VMSizeTypesStandardDS4V2Promo', 'VMSizeTypesStandardDS5V2', 'VMSizeTypesStandardDS5V2Promo', 'VMSizeTypesStandardE16V3', 'VMSizeTypesStandardE16sV3', 'VMSizeTypesStandardE2V3', 'VMSizeTypesStandardE2sV3', 'VMSizeTypesStandardE3216sV3', 'VMSizeTypesStandardE328sV3', 'VMSizeTypesStandardE32V3', 'VMSizeTypesStandardE32sV3', 'VMSizeTypesStandardE4V3', 'VMSizeTypesStandardE4sV3', 'VMSizeTypesStandardE6416sV3', 'VMSizeTypesStandardE6432sV3', 'VMSizeTypesStandardE64V3', 'VMSizeTypesStandardE64sV3', 'VMSizeTypesStandardE8V3', 'VMSizeTypesStandardE8sV3', 'VMSizeTypesStandardF1', 'VMSizeTypesStandardF16', 'VMSizeTypesStandardF16s', 'VMSizeTypesStandardF16sV2', 'VMSizeTypesStandardF1s', 'VMSizeTypesStandardF2', 'VMSizeTypesStandardF2s', 'VMSizeTypesStandardF2sV2', 'VMSizeTypesStandardF32sV2', 'VMSizeTypesStandardF4', 'VMSizeTypesStandardF4s', 'VMSizeTypesStandardF4sV2', 'VMSizeTypesStandardF64sV2', 'VMSizeTypesStandardF72sV2', 'VMSizeTypesStandardF8', 'VMSizeTypesStandardF8s', 'VMSizeTypesStandardF8sV2', 'VMSizeTypesStandardG1', 'VMSizeTypesStandardG2', 'VMSizeTypesStandardG3', 'VMSizeTypesStandardG4', 'VMSizeTypesStandardG5', 'VMSizeTypesStandardGS1', 'VMSizeTypesStandardGS2', 'VMSizeTypesStandardGS3', 'VMSizeTypesStandardGS4', 'VMSizeTypesStandardGS44', 'VMSizeTypesStandardGS48', 'VMSizeTypesStandardGS5', 'VMSizeTypesStandardGS516', 'VMSizeTypesStandardGS58', 'VMSizeTypesStandardH16', 'VMSizeTypesStandardH16m', 'VMSizeTypesStandardH16mr', 'VMSizeTypesStandardH16r', 'VMSizeTypesStandardH8', 'VMSizeTypesStandardH8m', 'VMSizeTypesStandardL16s', 'VMSizeTypesStandardL32s', 'VMSizeTypesStandardL4s', 'VMSizeTypesStandardL8s', 'VMSizeTypesStandardM12832ms', 'VMSizeTypesStandardM12864ms', 'VMSizeTypesStandardM128ms', 'VMSizeTypesStandardM128s', 'VMSizeTypesStandardM6416ms', 'VMSizeTypesStandardM6432ms', 'VMSizeTypesStandardM64ms', 'VMSizeTypesStandardM64s', 'VMSizeTypesStandardNC12', 'VMSizeTypesStandardNC12sV2', 'VMSizeTypesStandardNC12sV3', 'VMSizeTypesStandardNC24', 'VMSizeTypesStandardNC24r', 'VMSizeTypesStandardNC24rsV2', 'VMSizeTypesStandardNC24rsV3', 'VMSizeTypesStandardNC24sV2', 'VMSizeTypesStandardNC24sV3', 'VMSizeTypesStandardNC6', 'VMSizeTypesStandardNC6sV2', 'VMSizeTypesStandardNC6sV3', 'VMSizeTypesStandardND12s', 'VMSizeTypesStandardND24rs', 'VMSizeTypesStandardND24s', 'VMSizeTypesStandardND6s', 'VMSizeTypesStandardNV12', 'VMSizeTypesStandardNV24', 'VMSizeTypesStandardNV6'
+ VMSize VMSizeTypes `json:"vmSize,omitempty"`
+ // OsDiskSizeGB - OS Disk Size in GB to be used to specify the disk size for every machine in this master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified.
+ OsDiskSizeGB *int32 `json:"osDiskSizeGB,omitempty"`
+ // DNSPrefix - DNS prefix to be used to create the FQDN for the agent pool.
+ DNSPrefix *string `json:"dnsPrefix,omitempty"`
+ // Fqdn - READ-ONLY; FQDN for the agent pool.
+ Fqdn *string `json:"fqdn,omitempty"`
+ // Ports - Ports number array used to expose on this agent pool. The default opened ports are different based on your choice of orchestrator.
+ Ports *[]int32 `json:"ports,omitempty"`
+ // StorageProfile - Storage profile specifies what kind of storage used. Choose from StorageAccount and ManagedDisks. Leave it empty, we will choose for you based on the orchestrator choice. Possible values include: 'StorageAccount', 'ManagedDisks'
+ StorageProfile StorageProfileTypes `json:"storageProfile,omitempty"`
+ // VnetSubnetID - VNet SubnetID specifies the VNet's subnet identifier.
+ VnetSubnetID *string `json:"vnetSubnetID,omitempty"`
+ // OsType - OsType to be used to specify os type. Choose from Linux and Windows. Default to Linux. Possible values include: 'Linux', 'Windows'
+ OsType OSType `json:"osType,omitempty"`
+}
+
+// AgentPoolsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type AgentPoolsCreateOrUpdateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *AgentPoolsCreateOrUpdateFuture) Result(client AgentPoolsClient) (ap AgentPool, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("containerservice.AgentPoolsCreateOrUpdateFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if ap.Response.Response, err = future.GetResult(sender); err == nil && ap.Response.Response.StatusCode != http.StatusNoContent {
+ ap, err = client.CreateOrUpdateResponder(ap.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsCreateOrUpdateFuture", "Result", ap.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// AgentPoolsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type AgentPoolsDeleteFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *AgentPoolsDeleteFuture) Result(client AgentPoolsClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsDeleteFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("containerservice.AgentPoolsDeleteFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// AgentPoolUpgradeProfile the list of available upgrades for an agent pool.
+type AgentPoolUpgradeProfile struct {
+ autorest.Response `json:"-"`
+ // ID - READ-ONLY; Id of the agent pool upgrade profile.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Name of the agent pool upgrade profile.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Type of the agent pool upgrade profile.
+ Type *string `json:"type,omitempty"`
+ // AgentPoolUpgradeProfileProperties - Properties of agent pool upgrade profile.
+ *AgentPoolUpgradeProfileProperties `json:"properties,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for AgentPoolUpgradeProfile.
+func (apup AgentPoolUpgradeProfile) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if apup.AgentPoolUpgradeProfileProperties != nil {
+ objectMap["properties"] = apup.AgentPoolUpgradeProfileProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for AgentPoolUpgradeProfile struct.
+func (apup *AgentPoolUpgradeProfile) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ apup.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ apup.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ apup.Type = &typeVar
+ }
+ case "properties":
+ if v != nil {
+ var agentPoolUpgradeProfileProperties AgentPoolUpgradeProfileProperties
+ err = json.Unmarshal(*v, &agentPoolUpgradeProfileProperties)
+ if err != nil {
+ return err
+ }
+ apup.AgentPoolUpgradeProfileProperties = &agentPoolUpgradeProfileProperties
+ }
+ }
+ }
+
+ return nil
+}
+
+// AgentPoolUpgradeProfileProperties the list of available upgrade versions.
+type AgentPoolUpgradeProfileProperties struct {
+ // KubernetesVersion - Kubernetes version (major, minor, patch).
+ KubernetesVersion *string `json:"kubernetesVersion,omitempty"`
+ // OsType - OsType to be used to specify os type. Choose from Linux and Windows. Default to Linux. Possible values include: 'Linux', 'Windows'
+ OsType OSType `json:"osType,omitempty"`
+ // Upgrades - List of orchestrator types and versions available for upgrade.
+ Upgrades *[]AgentPoolUpgradeProfilePropertiesUpgradesItem `json:"upgrades,omitempty"`
+}
+
+// AgentPoolUpgradeProfilePropertiesUpgradesItem ...
+type AgentPoolUpgradeProfilePropertiesUpgradesItem struct {
+ // KubernetesVersion - Kubernetes version (major, minor, patch).
+ KubernetesVersion *string `json:"kubernetesVersion,omitempty"`
+ // IsPreview - Whether Kubernetes version is currently in preview.
+ IsPreview *bool `json:"isPreview,omitempty"`
+}
+
+// CloudError an error response from the Container service.
+type CloudError struct {
+ // Error - Details about the error.
+ Error *CloudErrorBody `json:"error,omitempty"`
+}
+
+// CloudErrorBody an error response from the Container service.
+type CloudErrorBody struct {
+ // Code - An identifier for the error. Codes are invariant and are intended to be consumed programmatically.
+ Code *string `json:"code,omitempty"`
+ // Message - A message describing the error, intended to be suitable for display in a user interface.
+ Message *string `json:"message,omitempty"`
+ // Target - The target of the particular error. For example, the name of the property in error.
+ Target *string `json:"target,omitempty"`
+ // Details - A list of additional details about the error.
+ Details *[]CloudErrorBody `json:"details,omitempty"`
+}
+
+// ContainerService container service.
+type ContainerService struct {
+ autorest.Response `json:"-"`
+ // Properties - Properties of the container service.
+ *Properties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Resource Id
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Resource name
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Resource type
+ Type *string `json:"type,omitempty"`
+ // Location - Resource location
+ Location *string `json:"location,omitempty"`
+ // Tags - Resource tags
+ Tags map[string]*string `json:"tags"`
+}
+
+// MarshalJSON is the custom marshaler for ContainerService.
+func (cs ContainerService) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if cs.Properties != nil {
+ objectMap["properties"] = cs.Properties
+ }
+ if cs.Location != nil {
+ objectMap["location"] = cs.Location
+ }
+ if cs.Tags != nil {
+ objectMap["tags"] = cs.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ContainerService struct.
+func (cs *ContainerService) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var properties Properties
+ err = json.Unmarshal(*v, &properties)
+ if err != nil {
+ return err
+ }
+ cs.Properties = &properties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ cs.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ cs.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ cs.Type = &typeVar
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ cs.Location = &location
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ cs.Tags = tags
+ }
+ }
+ }
+
+ return nil
+}
+
+// ContainerServicesCreateOrUpdateFutureType an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type ContainerServicesCreateOrUpdateFutureType struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *ContainerServicesCreateOrUpdateFutureType) Result(client ContainerServicesClient) (cs ContainerService, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesCreateOrUpdateFutureType", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("containerservice.ContainerServicesCreateOrUpdateFutureType")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if cs.Response.Response, err = future.GetResult(sender); err == nil && cs.Response.Response.StatusCode != http.StatusNoContent {
+ cs, err = client.CreateOrUpdateResponder(cs.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesCreateOrUpdateFutureType", "Result", cs.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// ContainerServicesDeleteFutureType an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type ContainerServicesDeleteFutureType struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *ContainerServicesDeleteFutureType) Result(client ContainerServicesClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ContainerServicesDeleteFutureType", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("containerservice.ContainerServicesDeleteFutureType")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// CredentialResult the credential result response.
+type CredentialResult struct {
+ // Name - READ-ONLY; The name of the credential.
+ Name *string `json:"name,omitempty"`
+ // Value - READ-ONLY; Base64-encoded Kubernetes configuration file.
+ Value *[]byte `json:"value,omitempty"`
+}
+
+// CredentialResults the list of credential result response.
+type CredentialResults struct {
+ autorest.Response `json:"-"`
+ // Kubeconfigs - READ-ONLY; Base64-encoded Kubernetes configuration file.
+ Kubeconfigs *[]CredentialResult `json:"kubeconfigs,omitempty"`
+}
+
+// CustomProfile properties to configure a custom container service cluster.
+type CustomProfile struct {
+ // Orchestrator - The name of the custom orchestrator to use.
+ Orchestrator *string `json:"orchestrator,omitempty"`
+}
+
+// DiagnosticsProfile profile for diagnostics on the container service cluster.
+type DiagnosticsProfile struct {
+ // VMDiagnostics - Profile for diagnostics on the container service VMs.
+ VMDiagnostics *VMDiagnostics `json:"vmDiagnostics,omitempty"`
+}
+
+// KeyVaultSecretRef reference to a secret stored in Azure Key Vault.
+type KeyVaultSecretRef struct {
+ // VaultID - Key vault identifier.
+ VaultID *string `json:"vaultID,omitempty"`
+ // SecretName - The secret name.
+ SecretName *string `json:"secretName,omitempty"`
+ // Version - The secret version.
+ Version *string `json:"version,omitempty"`
+}
+
+// LinuxProfile profile for Linux VMs in the container service cluster.
+type LinuxProfile struct {
+ // AdminUsername - The administrator username to use for Linux VMs.
+ AdminUsername *string `json:"adminUsername,omitempty"`
+ // SSH - SSH configuration for Linux-based VMs running on Azure.
+ SSH *SSHConfiguration `json:"ssh,omitempty"`
+}
+
+// ListResult the response from the List Container Services operation.
+type ListResult struct {
+ autorest.Response `json:"-"`
+ // Value - The list of container services.
+ Value *[]ContainerService `json:"value,omitempty"`
+ // NextLink - READ-ONLY; The URL to get the next set of container service results.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// ListResultIterator provides access to a complete listing of ContainerService values.
+type ListResultIterator struct {
+ i int
+ page ListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *ListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *ListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter ListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter ListResultIterator) Response() ListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter ListResultIterator) Value() ContainerService {
+ if !iter.page.NotDone() {
+ return ContainerService{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the ListResultIterator type.
+func NewListResultIterator(page ListResultPage) ListResultIterator {
+ return ListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (lr ListResult) IsEmpty() bool {
+ return lr.Value == nil || len(*lr.Value) == 0
+}
+
+// listResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (lr ListResult) listResultPreparer(ctx context.Context) (*http.Request, error) {
+ if lr.NextLink == nil || len(to.String(lr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(lr.NextLink)))
+}
+
+// ListResultPage contains a page of ContainerService values.
+type ListResultPage struct {
+ fn func(context.Context, ListResult) (ListResult, error)
+ lr ListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *ListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.lr)
+ if err != nil {
+ return err
+ }
+ page.lr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *ListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page ListResultPage) NotDone() bool {
+ return !page.lr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page ListResultPage) Response() ListResult {
+ return page.lr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page ListResultPage) Values() []ContainerService {
+ if page.lr.IsEmpty() {
+ return nil
+ }
+ return *page.lr.Value
+}
+
+// Creates a new instance of the ListResultPage type.
+func NewListResultPage(getNextPage func(context.Context, ListResult) (ListResult, error)) ListResultPage {
+ return ListResultPage{fn: getNextPage}
+}
+
+// ManagedCluster managed cluster.
+type ManagedCluster struct {
+ autorest.Response `json:"-"`
+ // ManagedClusterProperties - Properties of a managed cluster.
+ *ManagedClusterProperties `json:"properties,omitempty"`
+ // Identity - The identity of the managed cluster, if configured.
+ Identity *ManagedClusterIdentity `json:"identity,omitempty"`
+ // ID - READ-ONLY; Resource Id
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Resource name
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Resource type
+ Type *string `json:"type,omitempty"`
+ // Location - Resource location
+ Location *string `json:"location,omitempty"`
+ // Tags - Resource tags
+ Tags map[string]*string `json:"tags"`
+}
+
+// MarshalJSON is the custom marshaler for ManagedCluster.
+func (mc ManagedCluster) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if mc.ManagedClusterProperties != nil {
+ objectMap["properties"] = mc.ManagedClusterProperties
+ }
+ if mc.Identity != nil {
+ objectMap["identity"] = mc.Identity
+ }
+ if mc.Location != nil {
+ objectMap["location"] = mc.Location
+ }
+ if mc.Tags != nil {
+ objectMap["tags"] = mc.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ManagedCluster struct.
+func (mc *ManagedCluster) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var managedClusterProperties ManagedClusterProperties
+ err = json.Unmarshal(*v, &managedClusterProperties)
+ if err != nil {
+ return err
+ }
+ mc.ManagedClusterProperties = &managedClusterProperties
+ }
+ case "identity":
+ if v != nil {
+ var identity ManagedClusterIdentity
+ err = json.Unmarshal(*v, &identity)
+ if err != nil {
+ return err
+ }
+ mc.Identity = &identity
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ mc.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ mc.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ mc.Type = &typeVar
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ mc.Location = &location
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ mc.Tags = tags
+ }
+ }
+ }
+
+ return nil
+}
+
+// ManagedClusterAADProfile aADProfile specifies attributes for Azure Active Directory integration.
+type ManagedClusterAADProfile struct {
+ // ClientAppID - The client AAD application ID.
+ ClientAppID *string `json:"clientAppID,omitempty"`
+ // ServerAppID - The server AAD application ID.
+ ServerAppID *string `json:"serverAppID,omitempty"`
+ // ServerAppSecret - The server AAD application secret.
+ ServerAppSecret *string `json:"serverAppSecret,omitempty"`
+ // TenantID - The AAD tenant ID to use for authentication. If not specified, will use the tenant of the deployment subscription.
+ TenantID *string `json:"tenantID,omitempty"`
+}
+
+// ManagedClusterAccessProfile managed cluster Access Profile.
+type ManagedClusterAccessProfile struct {
+ autorest.Response `json:"-"`
+ // AccessProfile - AccessProfile of a managed cluster.
+ *AccessProfile `json:"properties,omitempty"`
+ // ID - READ-ONLY; Resource Id
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Resource name
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Resource type
+ Type *string `json:"type,omitempty"`
+ // Location - Resource location
+ Location *string `json:"location,omitempty"`
+ // Tags - Resource tags
+ Tags map[string]*string `json:"tags"`
+}
+
+// MarshalJSON is the custom marshaler for ManagedClusterAccessProfile.
+func (mcap ManagedClusterAccessProfile) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if mcap.AccessProfile != nil {
+ objectMap["properties"] = mcap.AccessProfile
+ }
+ if mcap.Location != nil {
+ objectMap["location"] = mcap.Location
+ }
+ if mcap.Tags != nil {
+ objectMap["tags"] = mcap.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ManagedClusterAccessProfile struct.
+func (mcap *ManagedClusterAccessProfile) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var accessProfile AccessProfile
+ err = json.Unmarshal(*v, &accessProfile)
+ if err != nil {
+ return err
+ }
+ mcap.AccessProfile = &accessProfile
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ mcap.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ mcap.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ mcap.Type = &typeVar
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ mcap.Location = &location
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ mcap.Tags = tags
+ }
+ }
+ }
+
+ return nil
+}
+
+// ManagedClusterAddonProfile a Kubernetes add-on profile for a managed cluster.
+type ManagedClusterAddonProfile struct {
+ // Enabled - Whether the add-on is enabled or not.
+ Enabled *bool `json:"enabled,omitempty"`
+ // Config - Key-value pairs for configuring an add-on.
+ Config map[string]*string `json:"config"`
+}
+
+// MarshalJSON is the custom marshaler for ManagedClusterAddonProfile.
+func (mcap ManagedClusterAddonProfile) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if mcap.Enabled != nil {
+ objectMap["enabled"] = mcap.Enabled
+ }
+ if mcap.Config != nil {
+ objectMap["config"] = mcap.Config
+ }
+ return json.Marshal(objectMap)
+}
+
+// ManagedClusterAgentPoolProfile profile for the container service agent pool.
+type ManagedClusterAgentPoolProfile struct {
+ // Name - Unique name of the agent pool profile in the context of the subscription and resource group.
+ Name *string `json:"name,omitempty"`
+ // Count - Number of agents (VMs) to host docker containers. Allowed values must be in the range of 1 to 100 (inclusive). The default value is 1.
+ Count *int32 `json:"count,omitempty"`
+ // VMSize - Size of agent VMs. Possible values include: 'VMSizeTypesStandardA1', 'VMSizeTypesStandardA10', 'VMSizeTypesStandardA11', 'VMSizeTypesStandardA1V2', 'VMSizeTypesStandardA2', 'VMSizeTypesStandardA2V2', 'VMSizeTypesStandardA2mV2', 'VMSizeTypesStandardA3', 'VMSizeTypesStandardA4', 'VMSizeTypesStandardA4V2', 'VMSizeTypesStandardA4mV2', 'VMSizeTypesStandardA5', 'VMSizeTypesStandardA6', 'VMSizeTypesStandardA7', 'VMSizeTypesStandardA8', 'VMSizeTypesStandardA8V2', 'VMSizeTypesStandardA8mV2', 'VMSizeTypesStandardA9', 'VMSizeTypesStandardB2ms', 'VMSizeTypesStandardB2s', 'VMSizeTypesStandardB4ms', 'VMSizeTypesStandardB8ms', 'VMSizeTypesStandardD1', 'VMSizeTypesStandardD11', 'VMSizeTypesStandardD11V2', 'VMSizeTypesStandardD11V2Promo', 'VMSizeTypesStandardD12', 'VMSizeTypesStandardD12V2', 'VMSizeTypesStandardD12V2Promo', 'VMSizeTypesStandardD13', 'VMSizeTypesStandardD13V2', 'VMSizeTypesStandardD13V2Promo', 'VMSizeTypesStandardD14', 'VMSizeTypesStandardD14V2', 'VMSizeTypesStandardD14V2Promo', 'VMSizeTypesStandardD15V2', 'VMSizeTypesStandardD16V3', 'VMSizeTypesStandardD16sV3', 'VMSizeTypesStandardD1V2', 'VMSizeTypesStandardD2', 'VMSizeTypesStandardD2V2', 'VMSizeTypesStandardD2V2Promo', 'VMSizeTypesStandardD2V3', 'VMSizeTypesStandardD2sV3', 'VMSizeTypesStandardD3', 'VMSizeTypesStandardD32V3', 'VMSizeTypesStandardD32sV3', 'VMSizeTypesStandardD3V2', 'VMSizeTypesStandardD3V2Promo', 'VMSizeTypesStandardD4', 'VMSizeTypesStandardD4V2', 'VMSizeTypesStandardD4V2Promo', 'VMSizeTypesStandardD4V3', 'VMSizeTypesStandardD4sV3', 'VMSizeTypesStandardD5V2', 'VMSizeTypesStandardD5V2Promo', 'VMSizeTypesStandardD64V3', 'VMSizeTypesStandardD64sV3', 'VMSizeTypesStandardD8V3', 'VMSizeTypesStandardD8sV3', 'VMSizeTypesStandardDS1', 'VMSizeTypesStandardDS11', 'VMSizeTypesStandardDS11V2', 'VMSizeTypesStandardDS11V2Promo', 'VMSizeTypesStandardDS12', 'VMSizeTypesStandardDS12V2', 'VMSizeTypesStandardDS12V2Promo', 'VMSizeTypesStandardDS13', 'VMSizeTypesStandardDS132V2', 'VMSizeTypesStandardDS134V2', 'VMSizeTypesStandardDS13V2', 'VMSizeTypesStandardDS13V2Promo', 'VMSizeTypesStandardDS14', 'VMSizeTypesStandardDS144V2', 'VMSizeTypesStandardDS148V2', 'VMSizeTypesStandardDS14V2', 'VMSizeTypesStandardDS14V2Promo', 'VMSizeTypesStandardDS15V2', 'VMSizeTypesStandardDS1V2', 'VMSizeTypesStandardDS2', 'VMSizeTypesStandardDS2V2', 'VMSizeTypesStandardDS2V2Promo', 'VMSizeTypesStandardDS3', 'VMSizeTypesStandardDS3V2', 'VMSizeTypesStandardDS3V2Promo', 'VMSizeTypesStandardDS4', 'VMSizeTypesStandardDS4V2', 'VMSizeTypesStandardDS4V2Promo', 'VMSizeTypesStandardDS5V2', 'VMSizeTypesStandardDS5V2Promo', 'VMSizeTypesStandardE16V3', 'VMSizeTypesStandardE16sV3', 'VMSizeTypesStandardE2V3', 'VMSizeTypesStandardE2sV3', 'VMSizeTypesStandardE3216sV3', 'VMSizeTypesStandardE328sV3', 'VMSizeTypesStandardE32V3', 'VMSizeTypesStandardE32sV3', 'VMSizeTypesStandardE4V3', 'VMSizeTypesStandardE4sV3', 'VMSizeTypesStandardE6416sV3', 'VMSizeTypesStandardE6432sV3', 'VMSizeTypesStandardE64V3', 'VMSizeTypesStandardE64sV3', 'VMSizeTypesStandardE8V3', 'VMSizeTypesStandardE8sV3', 'VMSizeTypesStandardF1', 'VMSizeTypesStandardF16', 'VMSizeTypesStandardF16s', 'VMSizeTypesStandardF16sV2', 'VMSizeTypesStandardF1s', 'VMSizeTypesStandardF2', 'VMSizeTypesStandardF2s', 'VMSizeTypesStandardF2sV2', 'VMSizeTypesStandardF32sV2', 'VMSizeTypesStandardF4', 'VMSizeTypesStandardF4s', 'VMSizeTypesStandardF4sV2', 'VMSizeTypesStandardF64sV2', 'VMSizeTypesStandardF72sV2', 'VMSizeTypesStandardF8', 'VMSizeTypesStandardF8s', 'VMSizeTypesStandardF8sV2', 'VMSizeTypesStandardG1', 'VMSizeTypesStandardG2', 'VMSizeTypesStandardG3', 'VMSizeTypesStandardG4', 'VMSizeTypesStandardG5', 'VMSizeTypesStandardGS1', 'VMSizeTypesStandardGS2', 'VMSizeTypesStandardGS3', 'VMSizeTypesStandardGS4', 'VMSizeTypesStandardGS44', 'VMSizeTypesStandardGS48', 'VMSizeTypesStandardGS5', 'VMSizeTypesStandardGS516', 'VMSizeTypesStandardGS58', 'VMSizeTypesStandardH16', 'VMSizeTypesStandardH16m', 'VMSizeTypesStandardH16mr', 'VMSizeTypesStandardH16r', 'VMSizeTypesStandardH8', 'VMSizeTypesStandardH8m', 'VMSizeTypesStandardL16s', 'VMSizeTypesStandardL32s', 'VMSizeTypesStandardL4s', 'VMSizeTypesStandardL8s', 'VMSizeTypesStandardM12832ms', 'VMSizeTypesStandardM12864ms', 'VMSizeTypesStandardM128ms', 'VMSizeTypesStandardM128s', 'VMSizeTypesStandardM6416ms', 'VMSizeTypesStandardM6432ms', 'VMSizeTypesStandardM64ms', 'VMSizeTypesStandardM64s', 'VMSizeTypesStandardNC12', 'VMSizeTypesStandardNC12sV2', 'VMSizeTypesStandardNC12sV3', 'VMSizeTypesStandardNC24', 'VMSizeTypesStandardNC24r', 'VMSizeTypesStandardNC24rsV2', 'VMSizeTypesStandardNC24rsV3', 'VMSizeTypesStandardNC24sV2', 'VMSizeTypesStandardNC24sV3', 'VMSizeTypesStandardNC6', 'VMSizeTypesStandardNC6sV2', 'VMSizeTypesStandardNC6sV3', 'VMSizeTypesStandardND12s', 'VMSizeTypesStandardND24rs', 'VMSizeTypesStandardND24s', 'VMSizeTypesStandardND6s', 'VMSizeTypesStandardNV12', 'VMSizeTypesStandardNV24', 'VMSizeTypesStandardNV6'
+ VMSize VMSizeTypes `json:"vmSize,omitempty"`
+ // OsDiskSizeGB - OS Disk Size in GB to be used to specify the disk size for every machine in this master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified.
+ OsDiskSizeGB *int32 `json:"osDiskSizeGB,omitempty"`
+ // VnetSubnetID - VNet SubnetID specifies the VNet's subnet identifier.
+ VnetSubnetID *string `json:"vnetSubnetID,omitempty"`
+ // MaxPods - Maximum number of pods that can run on a node.
+ MaxPods *int32 `json:"maxPods,omitempty"`
+ // OsType - OsType to be used to specify os type. Choose from Linux and Windows. Default to Linux. Possible values include: 'Linux', 'Windows'
+ OsType OSType `json:"osType,omitempty"`
+ // MaxCount - Maximum number of nodes for auto-scaling
+ MaxCount *int32 `json:"maxCount,omitempty"`
+ // MinCount - Minimum number of nodes for auto-scaling
+ MinCount *int32 `json:"minCount,omitempty"`
+ // EnableAutoScaling - Whether to enable auto-scaler
+ EnableAutoScaling *bool `json:"enableAutoScaling,omitempty"`
+ // Type - AgentPoolType represents types of an agent pool. Possible values include: 'VirtualMachineScaleSets', 'AvailabilitySet'
+ Type AgentPoolType `json:"type,omitempty"`
+ // OrchestratorVersion - Version of orchestrator specified when creating the managed cluster.
+ OrchestratorVersion *string `json:"orchestratorVersion,omitempty"`
+ // ProvisioningState - READ-ONLY; The current deployment or provisioning state, which only appears in the response.
+ ProvisioningState *string `json:"provisioningState,omitempty"`
+ // AvailabilityZones - (PREVIEW) Availability zones for nodes. Must use VirtualMachineScaleSets AgentPoolType.
+ AvailabilityZones *[]string `json:"availabilityZones,omitempty"`
+ // EnableNodePublicIP - Enable public IP for nodes
+ EnableNodePublicIP *bool `json:"enableNodePublicIP,omitempty"`
+ // ScaleSetPriority - ScaleSetPriority to be used to specify virtual machine scale set priority. Default to regular. Possible values include: 'Low', 'Regular'
+ ScaleSetPriority ScaleSetPriority `json:"scaleSetPriority,omitempty"`
+ // ScaleSetEvictionPolicy - ScaleSetEvictionPolicy to be used to specify eviction policy for low priority virtual machine scale set. Default to Delete. Possible values include: 'Delete', 'Deallocate'
+ ScaleSetEvictionPolicy ScaleSetEvictionPolicy `json:"scaleSetEvictionPolicy,omitempty"`
+ // NodeTaints - Taints added to new nodes during node pool create and scale. For example, key=value:NoSchedule.
+ NodeTaints *[]string `json:"nodeTaints,omitempty"`
+}
+
+// ManagedClusterAgentPoolProfileProperties properties for the container service agent pool profile.
+type ManagedClusterAgentPoolProfileProperties struct {
+ // Count - Number of agents (VMs) to host docker containers. Allowed values must be in the range of 1 to 100 (inclusive). The default value is 1.
+ Count *int32 `json:"count,omitempty"`
+ // VMSize - Size of agent VMs. Possible values include: 'VMSizeTypesStandardA1', 'VMSizeTypesStandardA10', 'VMSizeTypesStandardA11', 'VMSizeTypesStandardA1V2', 'VMSizeTypesStandardA2', 'VMSizeTypesStandardA2V2', 'VMSizeTypesStandardA2mV2', 'VMSizeTypesStandardA3', 'VMSizeTypesStandardA4', 'VMSizeTypesStandardA4V2', 'VMSizeTypesStandardA4mV2', 'VMSizeTypesStandardA5', 'VMSizeTypesStandardA6', 'VMSizeTypesStandardA7', 'VMSizeTypesStandardA8', 'VMSizeTypesStandardA8V2', 'VMSizeTypesStandardA8mV2', 'VMSizeTypesStandardA9', 'VMSizeTypesStandardB2ms', 'VMSizeTypesStandardB2s', 'VMSizeTypesStandardB4ms', 'VMSizeTypesStandardB8ms', 'VMSizeTypesStandardD1', 'VMSizeTypesStandardD11', 'VMSizeTypesStandardD11V2', 'VMSizeTypesStandardD11V2Promo', 'VMSizeTypesStandardD12', 'VMSizeTypesStandardD12V2', 'VMSizeTypesStandardD12V2Promo', 'VMSizeTypesStandardD13', 'VMSizeTypesStandardD13V2', 'VMSizeTypesStandardD13V2Promo', 'VMSizeTypesStandardD14', 'VMSizeTypesStandardD14V2', 'VMSizeTypesStandardD14V2Promo', 'VMSizeTypesStandardD15V2', 'VMSizeTypesStandardD16V3', 'VMSizeTypesStandardD16sV3', 'VMSizeTypesStandardD1V2', 'VMSizeTypesStandardD2', 'VMSizeTypesStandardD2V2', 'VMSizeTypesStandardD2V2Promo', 'VMSizeTypesStandardD2V3', 'VMSizeTypesStandardD2sV3', 'VMSizeTypesStandardD3', 'VMSizeTypesStandardD32V3', 'VMSizeTypesStandardD32sV3', 'VMSizeTypesStandardD3V2', 'VMSizeTypesStandardD3V2Promo', 'VMSizeTypesStandardD4', 'VMSizeTypesStandardD4V2', 'VMSizeTypesStandardD4V2Promo', 'VMSizeTypesStandardD4V3', 'VMSizeTypesStandardD4sV3', 'VMSizeTypesStandardD5V2', 'VMSizeTypesStandardD5V2Promo', 'VMSizeTypesStandardD64V3', 'VMSizeTypesStandardD64sV3', 'VMSizeTypesStandardD8V3', 'VMSizeTypesStandardD8sV3', 'VMSizeTypesStandardDS1', 'VMSizeTypesStandardDS11', 'VMSizeTypesStandardDS11V2', 'VMSizeTypesStandardDS11V2Promo', 'VMSizeTypesStandardDS12', 'VMSizeTypesStandardDS12V2', 'VMSizeTypesStandardDS12V2Promo', 'VMSizeTypesStandardDS13', 'VMSizeTypesStandardDS132V2', 'VMSizeTypesStandardDS134V2', 'VMSizeTypesStandardDS13V2', 'VMSizeTypesStandardDS13V2Promo', 'VMSizeTypesStandardDS14', 'VMSizeTypesStandardDS144V2', 'VMSizeTypesStandardDS148V2', 'VMSizeTypesStandardDS14V2', 'VMSizeTypesStandardDS14V2Promo', 'VMSizeTypesStandardDS15V2', 'VMSizeTypesStandardDS1V2', 'VMSizeTypesStandardDS2', 'VMSizeTypesStandardDS2V2', 'VMSizeTypesStandardDS2V2Promo', 'VMSizeTypesStandardDS3', 'VMSizeTypesStandardDS3V2', 'VMSizeTypesStandardDS3V2Promo', 'VMSizeTypesStandardDS4', 'VMSizeTypesStandardDS4V2', 'VMSizeTypesStandardDS4V2Promo', 'VMSizeTypesStandardDS5V2', 'VMSizeTypesStandardDS5V2Promo', 'VMSizeTypesStandardE16V3', 'VMSizeTypesStandardE16sV3', 'VMSizeTypesStandardE2V3', 'VMSizeTypesStandardE2sV3', 'VMSizeTypesStandardE3216sV3', 'VMSizeTypesStandardE328sV3', 'VMSizeTypesStandardE32V3', 'VMSizeTypesStandardE32sV3', 'VMSizeTypesStandardE4V3', 'VMSizeTypesStandardE4sV3', 'VMSizeTypesStandardE6416sV3', 'VMSizeTypesStandardE6432sV3', 'VMSizeTypesStandardE64V3', 'VMSizeTypesStandardE64sV3', 'VMSizeTypesStandardE8V3', 'VMSizeTypesStandardE8sV3', 'VMSizeTypesStandardF1', 'VMSizeTypesStandardF16', 'VMSizeTypesStandardF16s', 'VMSizeTypesStandardF16sV2', 'VMSizeTypesStandardF1s', 'VMSizeTypesStandardF2', 'VMSizeTypesStandardF2s', 'VMSizeTypesStandardF2sV2', 'VMSizeTypesStandardF32sV2', 'VMSizeTypesStandardF4', 'VMSizeTypesStandardF4s', 'VMSizeTypesStandardF4sV2', 'VMSizeTypesStandardF64sV2', 'VMSizeTypesStandardF72sV2', 'VMSizeTypesStandardF8', 'VMSizeTypesStandardF8s', 'VMSizeTypesStandardF8sV2', 'VMSizeTypesStandardG1', 'VMSizeTypesStandardG2', 'VMSizeTypesStandardG3', 'VMSizeTypesStandardG4', 'VMSizeTypesStandardG5', 'VMSizeTypesStandardGS1', 'VMSizeTypesStandardGS2', 'VMSizeTypesStandardGS3', 'VMSizeTypesStandardGS4', 'VMSizeTypesStandardGS44', 'VMSizeTypesStandardGS48', 'VMSizeTypesStandardGS5', 'VMSizeTypesStandardGS516', 'VMSizeTypesStandardGS58', 'VMSizeTypesStandardH16', 'VMSizeTypesStandardH16m', 'VMSizeTypesStandardH16mr', 'VMSizeTypesStandardH16r', 'VMSizeTypesStandardH8', 'VMSizeTypesStandardH8m', 'VMSizeTypesStandardL16s', 'VMSizeTypesStandardL32s', 'VMSizeTypesStandardL4s', 'VMSizeTypesStandardL8s', 'VMSizeTypesStandardM12832ms', 'VMSizeTypesStandardM12864ms', 'VMSizeTypesStandardM128ms', 'VMSizeTypesStandardM128s', 'VMSizeTypesStandardM6416ms', 'VMSizeTypesStandardM6432ms', 'VMSizeTypesStandardM64ms', 'VMSizeTypesStandardM64s', 'VMSizeTypesStandardNC12', 'VMSizeTypesStandardNC12sV2', 'VMSizeTypesStandardNC12sV3', 'VMSizeTypesStandardNC24', 'VMSizeTypesStandardNC24r', 'VMSizeTypesStandardNC24rsV2', 'VMSizeTypesStandardNC24rsV3', 'VMSizeTypesStandardNC24sV2', 'VMSizeTypesStandardNC24sV3', 'VMSizeTypesStandardNC6', 'VMSizeTypesStandardNC6sV2', 'VMSizeTypesStandardNC6sV3', 'VMSizeTypesStandardND12s', 'VMSizeTypesStandardND24rs', 'VMSizeTypesStandardND24s', 'VMSizeTypesStandardND6s', 'VMSizeTypesStandardNV12', 'VMSizeTypesStandardNV24', 'VMSizeTypesStandardNV6'
+ VMSize VMSizeTypes `json:"vmSize,omitempty"`
+ // OsDiskSizeGB - OS Disk Size in GB to be used to specify the disk size for every machine in this master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified.
+ OsDiskSizeGB *int32 `json:"osDiskSizeGB,omitempty"`
+ // VnetSubnetID - VNet SubnetID specifies the VNet's subnet identifier.
+ VnetSubnetID *string `json:"vnetSubnetID,omitempty"`
+ // MaxPods - Maximum number of pods that can run on a node.
+ MaxPods *int32 `json:"maxPods,omitempty"`
+ // OsType - OsType to be used to specify os type. Choose from Linux and Windows. Default to Linux. Possible values include: 'Linux', 'Windows'
+ OsType OSType `json:"osType,omitempty"`
+ // MaxCount - Maximum number of nodes for auto-scaling
+ MaxCount *int32 `json:"maxCount,omitempty"`
+ // MinCount - Minimum number of nodes for auto-scaling
+ MinCount *int32 `json:"minCount,omitempty"`
+ // EnableAutoScaling - Whether to enable auto-scaler
+ EnableAutoScaling *bool `json:"enableAutoScaling,omitempty"`
+ // Type - AgentPoolType represents types of an agent pool. Possible values include: 'VirtualMachineScaleSets', 'AvailabilitySet'
+ Type AgentPoolType `json:"type,omitempty"`
+ // OrchestratorVersion - Version of orchestrator specified when creating the managed cluster.
+ OrchestratorVersion *string `json:"orchestratorVersion,omitempty"`
+ // ProvisioningState - READ-ONLY; The current deployment or provisioning state, which only appears in the response.
+ ProvisioningState *string `json:"provisioningState,omitempty"`
+ // AvailabilityZones - (PREVIEW) Availability zones for nodes. Must use VirtualMachineScaleSets AgentPoolType.
+ AvailabilityZones *[]string `json:"availabilityZones,omitempty"`
+ // EnableNodePublicIP - Enable public IP for nodes
+ EnableNodePublicIP *bool `json:"enableNodePublicIP,omitempty"`
+ // ScaleSetPriority - ScaleSetPriority to be used to specify virtual machine scale set priority. Default to regular. Possible values include: 'Low', 'Regular'
+ ScaleSetPriority ScaleSetPriority `json:"scaleSetPriority,omitempty"`
+ // ScaleSetEvictionPolicy - ScaleSetEvictionPolicy to be used to specify eviction policy for low priority virtual machine scale set. Default to Delete. Possible values include: 'Delete', 'Deallocate'
+ ScaleSetEvictionPolicy ScaleSetEvictionPolicy `json:"scaleSetEvictionPolicy,omitempty"`
+ // NodeTaints - Taints added to new nodes during node pool create and scale. For example, key=value:NoSchedule.
+ NodeTaints *[]string `json:"nodeTaints,omitempty"`
+}
+
+// ManagedClusterAPIServerAccessProfile access profile for managed cluster API server.
+type ManagedClusterAPIServerAccessProfile struct {
+ // AuthorizedIPRanges - Authorized IP Ranges to kubernetes API server.
+ AuthorizedIPRanges *[]string `json:"authorizedIPRanges,omitempty"`
+ // EnablePrivateCluster - Whether to create the cluster as a private cluster or not.
+ EnablePrivateCluster *bool `json:"enablePrivateCluster,omitempty"`
+}
+
+// ManagedClusterIdentity identity for the managed cluster.
+type ManagedClusterIdentity struct {
+ // PrincipalID - READ-ONLY; The principal id of the system assigned identity which is used by master components.
+ PrincipalID *string `json:"principalId,omitempty"`
+ // TenantID - READ-ONLY; The tenant id of the system assigned identity which is used by master components.
+ TenantID *string `json:"tenantId,omitempty"`
+ // Type - The type of identity used for the managed cluster. Type 'SystemAssigned' will use an implicitly created identity in master components and an auto-created user assigned identity in MC_ resource group in agent nodes. Type 'None' will not use MSI for the managed cluster, service principal will be used instead. Possible values include: 'SystemAssigned', 'None'
+ Type ResourceIdentityType `json:"type,omitempty"`
+}
+
+// ManagedClusterListResult the response from the List Managed Clusters operation.
+type ManagedClusterListResult struct {
+ autorest.Response `json:"-"`
+ // Value - The list of managed clusters.
+ Value *[]ManagedCluster `json:"value,omitempty"`
+ // NextLink - READ-ONLY; The URL to get the next set of managed cluster results.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// ManagedClusterListResultIterator provides access to a complete listing of ManagedCluster values.
+type ManagedClusterListResultIterator struct {
+ i int
+ page ManagedClusterListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *ManagedClusterListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClusterListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *ManagedClusterListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter ManagedClusterListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter ManagedClusterListResultIterator) Response() ManagedClusterListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter ManagedClusterListResultIterator) Value() ManagedCluster {
+ if !iter.page.NotDone() {
+ return ManagedCluster{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the ManagedClusterListResultIterator type.
+func NewManagedClusterListResultIterator(page ManagedClusterListResultPage) ManagedClusterListResultIterator {
+ return ManagedClusterListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (mclr ManagedClusterListResult) IsEmpty() bool {
+ return mclr.Value == nil || len(*mclr.Value) == 0
+}
+
+// managedClusterListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (mclr ManagedClusterListResult) managedClusterListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if mclr.NextLink == nil || len(to.String(mclr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(mclr.NextLink)))
+}
+
+// ManagedClusterListResultPage contains a page of ManagedCluster values.
+type ManagedClusterListResultPage struct {
+ fn func(context.Context, ManagedClusterListResult) (ManagedClusterListResult, error)
+ mclr ManagedClusterListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *ManagedClusterListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClusterListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.mclr)
+ if err != nil {
+ return err
+ }
+ page.mclr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *ManagedClusterListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page ManagedClusterListResultPage) NotDone() bool {
+ return !page.mclr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page ManagedClusterListResultPage) Response() ManagedClusterListResult {
+ return page.mclr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page ManagedClusterListResultPage) Values() []ManagedCluster {
+ if page.mclr.IsEmpty() {
+ return nil
+ }
+ return *page.mclr.Value
+}
+
+// Creates a new instance of the ManagedClusterListResultPage type.
+func NewManagedClusterListResultPage(getNextPage func(context.Context, ManagedClusterListResult) (ManagedClusterListResult, error)) ManagedClusterListResultPage {
+ return ManagedClusterListResultPage{fn: getNextPage}
+}
+
+// ManagedClusterLoadBalancerProfile profile of the managed cluster load balancer
+type ManagedClusterLoadBalancerProfile struct {
+ // ManagedOutboundIPs - Desired managed outbound IPs for the cluster load balancer.
+ ManagedOutboundIPs *ManagedClusterLoadBalancerProfileManagedOutboundIPs `json:"managedOutboundIPs,omitempty"`
+ // OutboundIPPrefixes - Desired outbound IP Prefix resources for the cluster load balancer.
+ OutboundIPPrefixes *ManagedClusterLoadBalancerProfileOutboundIPPrefixes `json:"outboundIPPrefixes,omitempty"`
+ // OutboundIPs - Desired outbound IP resources for the cluster load balancer.
+ OutboundIPs *ManagedClusterLoadBalancerProfileOutboundIPs `json:"outboundIPs,omitempty"`
+ // EffectiveOutboundIPs - The effective outbound IP resources of the cluster load balancer.
+ EffectiveOutboundIPs *[]ResourceReference `json:"effectiveOutboundIPs,omitempty"`
+}
+
+// ManagedClusterLoadBalancerProfileManagedOutboundIPs desired managed outbound IPs for the cluster load
+// balancer.
+type ManagedClusterLoadBalancerProfileManagedOutboundIPs struct {
+ // Count - Desired number of outbound IP created/managed by Azure for the cluster load balancer. Allowed values must be in the range of 1 to 100 (inclusive). The default value is 1.
+ Count *int32 `json:"count,omitempty"`
+}
+
+// ManagedClusterLoadBalancerProfileOutboundIPPrefixes desired outbound IP Prefix resources for the cluster
+// load balancer.
+type ManagedClusterLoadBalancerProfileOutboundIPPrefixes struct {
+ // PublicIPPrefixes - A list of public IP prefix resources.
+ PublicIPPrefixes *[]ResourceReference `json:"publicIPPrefixes,omitempty"`
+}
+
+// ManagedClusterLoadBalancerProfileOutboundIPs desired outbound IP resources for the cluster load
+// balancer.
+type ManagedClusterLoadBalancerProfileOutboundIPs struct {
+ // PublicIPs - A list of public IP resources.
+ PublicIPs *[]ResourceReference `json:"publicIPs,omitempty"`
+}
+
+// ManagedClusterPoolUpgradeProfile the list of available upgrade versions.
+type ManagedClusterPoolUpgradeProfile struct {
+ // KubernetesVersion - Kubernetes version (major, minor, patch).
+ KubernetesVersion *string `json:"kubernetesVersion,omitempty"`
+ // Name - Pool name.
+ Name *string `json:"name,omitempty"`
+ // OsType - OsType to be used to specify os type. Choose from Linux and Windows. Default to Linux. Possible values include: 'Linux', 'Windows'
+ OsType OSType `json:"osType,omitempty"`
+ // Upgrades - List of orchestrator types and versions available for upgrade.
+ Upgrades *[]ManagedClusterPoolUpgradeProfileUpgradesItem `json:"upgrades,omitempty"`
+}
+
+// ManagedClusterPoolUpgradeProfileUpgradesItem ...
+type ManagedClusterPoolUpgradeProfileUpgradesItem struct {
+ // KubernetesVersion - Kubernetes version (major, minor, patch).
+ KubernetesVersion *string `json:"kubernetesVersion,omitempty"`
+ // IsPreview - Whether Kubernetes version is currently in preview.
+ IsPreview *bool `json:"isPreview,omitempty"`
+}
+
+// ManagedClusterProperties properties of the managed cluster.
+type ManagedClusterProperties struct {
+ // ProvisioningState - READ-ONLY; The current deployment or provisioning state, which only appears in the response.
+ ProvisioningState *string `json:"provisioningState,omitempty"`
+ // MaxAgentPools - READ-ONLY; The max number of agent pools for the managed cluster.
+ MaxAgentPools *int32 `json:"maxAgentPools,omitempty"`
+ // KubernetesVersion - Version of Kubernetes specified when creating the managed cluster.
+ KubernetesVersion *string `json:"kubernetesVersion,omitempty"`
+ // DNSPrefix - DNS prefix specified when creating the managed cluster.
+ DNSPrefix *string `json:"dnsPrefix,omitempty"`
+ // Fqdn - READ-ONLY; FQDN for the master pool.
+ Fqdn *string `json:"fqdn,omitempty"`
+ // AgentPoolProfiles - Properties of the agent pool.
+ AgentPoolProfiles *[]ManagedClusterAgentPoolProfile `json:"agentPoolProfiles,omitempty"`
+ // LinuxProfile - Profile for Linux VMs in the container service cluster.
+ LinuxProfile *LinuxProfile `json:"linuxProfile,omitempty"`
+ // WindowsProfile - Profile for Windows VMs in the container service cluster.
+ WindowsProfile *ManagedClusterWindowsProfile `json:"windowsProfile,omitempty"`
+ // ServicePrincipalProfile - Information about a service principal identity for the cluster to use for manipulating Azure APIs.
+ ServicePrincipalProfile *ManagedClusterServicePrincipalProfile `json:"servicePrincipalProfile,omitempty"`
+ // AddonProfiles - Profile of managed cluster add-on.
+ AddonProfiles map[string]*ManagedClusterAddonProfile `json:"addonProfiles"`
+ // NodeResourceGroup - Name of the resource group containing agent pool nodes.
+ NodeResourceGroup *string `json:"nodeResourceGroup,omitempty"`
+ // EnableRBAC - Whether to enable Kubernetes Role-Based Access Control.
+ EnableRBAC *bool `json:"enableRBAC,omitempty"`
+ // EnablePodSecurityPolicy - (PREVIEW) Whether to enable Kubernetes Pod security policy.
+ EnablePodSecurityPolicy *bool `json:"enablePodSecurityPolicy,omitempty"`
+ // NetworkProfile - Profile of network configuration.
+ NetworkProfile *NetworkProfileType `json:"networkProfile,omitempty"`
+ // AadProfile - Profile of Azure Active Directory configuration.
+ AadProfile *ManagedClusterAADProfile `json:"aadProfile,omitempty"`
+ // APIServerAccessProfile - Access profile for managed cluster API server.
+ APIServerAccessProfile *ManagedClusterAPIServerAccessProfile `json:"apiServerAccessProfile,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ManagedClusterProperties.
+func (mcp ManagedClusterProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if mcp.KubernetesVersion != nil {
+ objectMap["kubernetesVersion"] = mcp.KubernetesVersion
+ }
+ if mcp.DNSPrefix != nil {
+ objectMap["dnsPrefix"] = mcp.DNSPrefix
+ }
+ if mcp.AgentPoolProfiles != nil {
+ objectMap["agentPoolProfiles"] = mcp.AgentPoolProfiles
+ }
+ if mcp.LinuxProfile != nil {
+ objectMap["linuxProfile"] = mcp.LinuxProfile
+ }
+ if mcp.WindowsProfile != nil {
+ objectMap["windowsProfile"] = mcp.WindowsProfile
+ }
+ if mcp.ServicePrincipalProfile != nil {
+ objectMap["servicePrincipalProfile"] = mcp.ServicePrincipalProfile
+ }
+ if mcp.AddonProfiles != nil {
+ objectMap["addonProfiles"] = mcp.AddonProfiles
+ }
+ if mcp.NodeResourceGroup != nil {
+ objectMap["nodeResourceGroup"] = mcp.NodeResourceGroup
+ }
+ if mcp.EnableRBAC != nil {
+ objectMap["enableRBAC"] = mcp.EnableRBAC
+ }
+ if mcp.EnablePodSecurityPolicy != nil {
+ objectMap["enablePodSecurityPolicy"] = mcp.EnablePodSecurityPolicy
+ }
+ if mcp.NetworkProfile != nil {
+ objectMap["networkProfile"] = mcp.NetworkProfile
+ }
+ if mcp.AadProfile != nil {
+ objectMap["aadProfile"] = mcp.AadProfile
+ }
+ if mcp.APIServerAccessProfile != nil {
+ objectMap["apiServerAccessProfile"] = mcp.APIServerAccessProfile
+ }
+ return json.Marshal(objectMap)
+}
+
+// ManagedClustersCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type ManagedClustersCreateOrUpdateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *ManagedClustersCreateOrUpdateFuture) Result(client ManagedClustersClient) (mc ManagedCluster, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("containerservice.ManagedClustersCreateOrUpdateFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if mc.Response.Response, err = future.GetResult(sender); err == nil && mc.Response.Response.StatusCode != http.StatusNoContent {
+ mc, err = client.CreateOrUpdateResponder(mc.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersCreateOrUpdateFuture", "Result", mc.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// ManagedClustersDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type ManagedClustersDeleteFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *ManagedClustersDeleteFuture) Result(client ManagedClustersClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersDeleteFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("containerservice.ManagedClustersDeleteFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// ManagedClusterServicePrincipalProfile information about a service principal identity for the cluster to
+// use for manipulating Azure APIs.
+type ManagedClusterServicePrincipalProfile struct {
+ // ClientID - The ID for the service principal.
+ ClientID *string `json:"clientId,omitempty"`
+ // Secret - The secret password associated with the service principal in plain text.
+ Secret *string `json:"secret,omitempty"`
+}
+
+// ManagedClustersResetAADProfileFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type ManagedClustersResetAADProfileFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *ManagedClustersResetAADProfileFuture) Result(client ManagedClustersClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersResetAADProfileFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("containerservice.ManagedClustersResetAADProfileFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// ManagedClustersResetServicePrincipalProfileFuture an abstraction for monitoring and retrieving the
+// results of a long-running operation.
+type ManagedClustersResetServicePrincipalProfileFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *ManagedClustersResetServicePrincipalProfileFuture) Result(client ManagedClustersClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersResetServicePrincipalProfileFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("containerservice.ManagedClustersResetServicePrincipalProfileFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// ManagedClustersUpdateTagsFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type ManagedClustersUpdateTagsFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *ManagedClustersUpdateTagsFuture) Result(client ManagedClustersClient) (mc ManagedCluster, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersUpdateTagsFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("containerservice.ManagedClustersUpdateTagsFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if mc.Response.Response, err = future.GetResult(sender); err == nil && mc.Response.Response.StatusCode != http.StatusNoContent {
+ mc, err = client.UpdateTagsResponder(mc.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersUpdateTagsFuture", "Result", mc.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// ManagedClusterUpgradeProfile the list of available upgrades for compute pools.
+type ManagedClusterUpgradeProfile struct {
+ autorest.Response `json:"-"`
+ // ID - READ-ONLY; Id of upgrade profile.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Name of upgrade profile.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Type of upgrade profile.
+ Type *string `json:"type,omitempty"`
+ // ManagedClusterUpgradeProfileProperties - Properties of upgrade profile.
+ *ManagedClusterUpgradeProfileProperties `json:"properties,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ManagedClusterUpgradeProfile.
+func (mcup ManagedClusterUpgradeProfile) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if mcup.ManagedClusterUpgradeProfileProperties != nil {
+ objectMap["properties"] = mcup.ManagedClusterUpgradeProfileProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ManagedClusterUpgradeProfile struct.
+func (mcup *ManagedClusterUpgradeProfile) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ mcup.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ mcup.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ mcup.Type = &typeVar
+ }
+ case "properties":
+ if v != nil {
+ var managedClusterUpgradeProfileProperties ManagedClusterUpgradeProfileProperties
+ err = json.Unmarshal(*v, &managedClusterUpgradeProfileProperties)
+ if err != nil {
+ return err
+ }
+ mcup.ManagedClusterUpgradeProfileProperties = &managedClusterUpgradeProfileProperties
+ }
+ }
+ }
+
+ return nil
+}
+
+// ManagedClusterUpgradeProfileProperties control plane and agent pool upgrade profiles.
+type ManagedClusterUpgradeProfileProperties struct {
+ // ControlPlaneProfile - The list of available upgrade versions for the control plane.
+ ControlPlaneProfile *ManagedClusterPoolUpgradeProfile `json:"controlPlaneProfile,omitempty"`
+ // AgentPoolProfiles - The list of available upgrade versions for agent pools.
+ AgentPoolProfiles *[]ManagedClusterPoolUpgradeProfile `json:"agentPoolProfiles,omitempty"`
+}
+
+// ManagedClusterWindowsProfile profile for Windows VMs in the container service cluster.
+type ManagedClusterWindowsProfile struct {
+ // AdminUsername - The administrator username to use for Windows VMs.
+ AdminUsername *string `json:"adminUsername,omitempty"`
+ // AdminPassword - The administrator password to use for Windows VMs.
+ AdminPassword *string `json:"adminPassword,omitempty"`
+}
+
+// MasterProfile profile for the container service master.
+type MasterProfile struct {
+ // Count - Number of masters (VMs) in the container service cluster. Allowed values are 1, 3, and 5. The default value is 1.
+ Count *int32 `json:"count,omitempty"`
+ // DNSPrefix - DNS prefix to be used to create the FQDN for the master pool.
+ DNSPrefix *string `json:"dnsPrefix,omitempty"`
+ // VMSize - Size of agent VMs. Possible values include: 'VMSizeTypesStandardA1', 'VMSizeTypesStandardA10', 'VMSizeTypesStandardA11', 'VMSizeTypesStandardA1V2', 'VMSizeTypesStandardA2', 'VMSizeTypesStandardA2V2', 'VMSizeTypesStandardA2mV2', 'VMSizeTypesStandardA3', 'VMSizeTypesStandardA4', 'VMSizeTypesStandardA4V2', 'VMSizeTypesStandardA4mV2', 'VMSizeTypesStandardA5', 'VMSizeTypesStandardA6', 'VMSizeTypesStandardA7', 'VMSizeTypesStandardA8', 'VMSizeTypesStandardA8V2', 'VMSizeTypesStandardA8mV2', 'VMSizeTypesStandardA9', 'VMSizeTypesStandardB2ms', 'VMSizeTypesStandardB2s', 'VMSizeTypesStandardB4ms', 'VMSizeTypesStandardB8ms', 'VMSizeTypesStandardD1', 'VMSizeTypesStandardD11', 'VMSizeTypesStandardD11V2', 'VMSizeTypesStandardD11V2Promo', 'VMSizeTypesStandardD12', 'VMSizeTypesStandardD12V2', 'VMSizeTypesStandardD12V2Promo', 'VMSizeTypesStandardD13', 'VMSizeTypesStandardD13V2', 'VMSizeTypesStandardD13V2Promo', 'VMSizeTypesStandardD14', 'VMSizeTypesStandardD14V2', 'VMSizeTypesStandardD14V2Promo', 'VMSizeTypesStandardD15V2', 'VMSizeTypesStandardD16V3', 'VMSizeTypesStandardD16sV3', 'VMSizeTypesStandardD1V2', 'VMSizeTypesStandardD2', 'VMSizeTypesStandardD2V2', 'VMSizeTypesStandardD2V2Promo', 'VMSizeTypesStandardD2V3', 'VMSizeTypesStandardD2sV3', 'VMSizeTypesStandardD3', 'VMSizeTypesStandardD32V3', 'VMSizeTypesStandardD32sV3', 'VMSizeTypesStandardD3V2', 'VMSizeTypesStandardD3V2Promo', 'VMSizeTypesStandardD4', 'VMSizeTypesStandardD4V2', 'VMSizeTypesStandardD4V2Promo', 'VMSizeTypesStandardD4V3', 'VMSizeTypesStandardD4sV3', 'VMSizeTypesStandardD5V2', 'VMSizeTypesStandardD5V2Promo', 'VMSizeTypesStandardD64V3', 'VMSizeTypesStandardD64sV3', 'VMSizeTypesStandardD8V3', 'VMSizeTypesStandardD8sV3', 'VMSizeTypesStandardDS1', 'VMSizeTypesStandardDS11', 'VMSizeTypesStandardDS11V2', 'VMSizeTypesStandardDS11V2Promo', 'VMSizeTypesStandardDS12', 'VMSizeTypesStandardDS12V2', 'VMSizeTypesStandardDS12V2Promo', 'VMSizeTypesStandardDS13', 'VMSizeTypesStandardDS132V2', 'VMSizeTypesStandardDS134V2', 'VMSizeTypesStandardDS13V2', 'VMSizeTypesStandardDS13V2Promo', 'VMSizeTypesStandardDS14', 'VMSizeTypesStandardDS144V2', 'VMSizeTypesStandardDS148V2', 'VMSizeTypesStandardDS14V2', 'VMSizeTypesStandardDS14V2Promo', 'VMSizeTypesStandardDS15V2', 'VMSizeTypesStandardDS1V2', 'VMSizeTypesStandardDS2', 'VMSizeTypesStandardDS2V2', 'VMSizeTypesStandardDS2V2Promo', 'VMSizeTypesStandardDS3', 'VMSizeTypesStandardDS3V2', 'VMSizeTypesStandardDS3V2Promo', 'VMSizeTypesStandardDS4', 'VMSizeTypesStandardDS4V2', 'VMSizeTypesStandardDS4V2Promo', 'VMSizeTypesStandardDS5V2', 'VMSizeTypesStandardDS5V2Promo', 'VMSizeTypesStandardE16V3', 'VMSizeTypesStandardE16sV3', 'VMSizeTypesStandardE2V3', 'VMSizeTypesStandardE2sV3', 'VMSizeTypesStandardE3216sV3', 'VMSizeTypesStandardE328sV3', 'VMSizeTypesStandardE32V3', 'VMSizeTypesStandardE32sV3', 'VMSizeTypesStandardE4V3', 'VMSizeTypesStandardE4sV3', 'VMSizeTypesStandardE6416sV3', 'VMSizeTypesStandardE6432sV3', 'VMSizeTypesStandardE64V3', 'VMSizeTypesStandardE64sV3', 'VMSizeTypesStandardE8V3', 'VMSizeTypesStandardE8sV3', 'VMSizeTypesStandardF1', 'VMSizeTypesStandardF16', 'VMSizeTypesStandardF16s', 'VMSizeTypesStandardF16sV2', 'VMSizeTypesStandardF1s', 'VMSizeTypesStandardF2', 'VMSizeTypesStandardF2s', 'VMSizeTypesStandardF2sV2', 'VMSizeTypesStandardF32sV2', 'VMSizeTypesStandardF4', 'VMSizeTypesStandardF4s', 'VMSizeTypesStandardF4sV2', 'VMSizeTypesStandardF64sV2', 'VMSizeTypesStandardF72sV2', 'VMSizeTypesStandardF8', 'VMSizeTypesStandardF8s', 'VMSizeTypesStandardF8sV2', 'VMSizeTypesStandardG1', 'VMSizeTypesStandardG2', 'VMSizeTypesStandardG3', 'VMSizeTypesStandardG4', 'VMSizeTypesStandardG5', 'VMSizeTypesStandardGS1', 'VMSizeTypesStandardGS2', 'VMSizeTypesStandardGS3', 'VMSizeTypesStandardGS4', 'VMSizeTypesStandardGS44', 'VMSizeTypesStandardGS48', 'VMSizeTypesStandardGS5', 'VMSizeTypesStandardGS516', 'VMSizeTypesStandardGS58', 'VMSizeTypesStandardH16', 'VMSizeTypesStandardH16m', 'VMSizeTypesStandardH16mr', 'VMSizeTypesStandardH16r', 'VMSizeTypesStandardH8', 'VMSizeTypesStandardH8m', 'VMSizeTypesStandardL16s', 'VMSizeTypesStandardL32s', 'VMSizeTypesStandardL4s', 'VMSizeTypesStandardL8s', 'VMSizeTypesStandardM12832ms', 'VMSizeTypesStandardM12864ms', 'VMSizeTypesStandardM128ms', 'VMSizeTypesStandardM128s', 'VMSizeTypesStandardM6416ms', 'VMSizeTypesStandardM6432ms', 'VMSizeTypesStandardM64ms', 'VMSizeTypesStandardM64s', 'VMSizeTypesStandardNC12', 'VMSizeTypesStandardNC12sV2', 'VMSizeTypesStandardNC12sV3', 'VMSizeTypesStandardNC24', 'VMSizeTypesStandardNC24r', 'VMSizeTypesStandardNC24rsV2', 'VMSizeTypesStandardNC24rsV3', 'VMSizeTypesStandardNC24sV2', 'VMSizeTypesStandardNC24sV3', 'VMSizeTypesStandardNC6', 'VMSizeTypesStandardNC6sV2', 'VMSizeTypesStandardNC6sV3', 'VMSizeTypesStandardND12s', 'VMSizeTypesStandardND24rs', 'VMSizeTypesStandardND24s', 'VMSizeTypesStandardND6s', 'VMSizeTypesStandardNV12', 'VMSizeTypesStandardNV24', 'VMSizeTypesStandardNV6'
+ VMSize VMSizeTypes `json:"vmSize,omitempty"`
+ // OsDiskSizeGB - OS Disk Size in GB to be used to specify the disk size for every machine in this master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified.
+ OsDiskSizeGB *int32 `json:"osDiskSizeGB,omitempty"`
+ // VnetSubnetID - VNet SubnetID specifies the VNet's subnet identifier.
+ VnetSubnetID *string `json:"vnetSubnetID,omitempty"`
+ // FirstConsecutiveStaticIP - FirstConsecutiveStaticIP used to specify the first static ip of masters.
+ FirstConsecutiveStaticIP *string `json:"firstConsecutiveStaticIP,omitempty"`
+ // StorageProfile - Storage profile specifies what kind of storage used. Choose from StorageAccount and ManagedDisks. Leave it empty, we will choose for you based on the orchestrator choice. Possible values include: 'StorageAccount', 'ManagedDisks'
+ StorageProfile StorageProfileTypes `json:"storageProfile,omitempty"`
+ // Fqdn - READ-ONLY; FQDN for the master pool.
+ Fqdn *string `json:"fqdn,omitempty"`
+}
+
+// NetworkProfile represents the OpenShift networking configuration
+type NetworkProfile struct {
+ // VnetCidr - CIDR for the OpenShift Vnet.
+ VnetCidr *string `json:"vnetCidr,omitempty"`
+ // PeerVnetID - CIDR of the Vnet to peer.
+ PeerVnetID *string `json:"peerVnetId,omitempty"`
+ // VnetID - ID of the Vnet created for OSA cluster.
+ VnetID *string `json:"vnetId,omitempty"`
+}
+
+// NetworkProfileType profile of network configuration.
+type NetworkProfileType struct {
+ // NetworkPlugin - Network plugin used for building Kubernetes network. Possible values include: 'Azure', 'Kubenet'
+ NetworkPlugin NetworkPlugin `json:"networkPlugin,omitempty"`
+ // NetworkPolicy - Network policy used for building Kubernetes network. Possible values include: 'NetworkPolicyCalico', 'NetworkPolicyAzure'
+ NetworkPolicy NetworkPolicy `json:"networkPolicy,omitempty"`
+ // PodCidr - A CIDR notation IP range from which to assign pod IPs when kubenet is used.
+ PodCidr *string `json:"podCidr,omitempty"`
+ // ServiceCidr - A CIDR notation IP range from which to assign service cluster IPs. It must not overlap with any Subnet IP ranges.
+ ServiceCidr *string `json:"serviceCidr,omitempty"`
+ // DNSServiceIP - An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes service address range specified in serviceCidr.
+ DNSServiceIP *string `json:"dnsServiceIP,omitempty"`
+ // DockerBridgeCidr - A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes service address range.
+ DockerBridgeCidr *string `json:"dockerBridgeCidr,omitempty"`
+ // LoadBalancerSku - The load balancer sku for the managed cluster. Possible values include: 'Standard', 'Basic'
+ LoadBalancerSku LoadBalancerSku `json:"loadBalancerSku,omitempty"`
+ // LoadBalancerProfile - Profile of the cluster load balancer.
+ LoadBalancerProfile *ManagedClusterLoadBalancerProfile `json:"loadBalancerProfile,omitempty"`
+}
+
+// OpenShiftManagedCluster openShift Managed cluster.
+type OpenShiftManagedCluster struct {
+ autorest.Response `json:"-"`
+ // Plan - Define the resource plan as required by ARM for billing purposes
+ Plan *PurchasePlan `json:"plan,omitempty"`
+ // OpenShiftManagedClusterProperties - Properties of a OpenShift managed cluster.
+ *OpenShiftManagedClusterProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Resource Id
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Resource name
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Resource type
+ Type *string `json:"type,omitempty"`
+ // Location - Resource location
+ Location *string `json:"location,omitempty"`
+ // Tags - Resource tags
+ Tags map[string]*string `json:"tags"`
+}
+
+// MarshalJSON is the custom marshaler for OpenShiftManagedCluster.
+func (osmc OpenShiftManagedCluster) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if osmc.Plan != nil {
+ objectMap["plan"] = osmc.Plan
+ }
+ if osmc.OpenShiftManagedClusterProperties != nil {
+ objectMap["properties"] = osmc.OpenShiftManagedClusterProperties
+ }
+ if osmc.Location != nil {
+ objectMap["location"] = osmc.Location
+ }
+ if osmc.Tags != nil {
+ objectMap["tags"] = osmc.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for OpenShiftManagedCluster struct.
+func (osmc *OpenShiftManagedCluster) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "plan":
+ if v != nil {
+ var plan PurchasePlan
+ err = json.Unmarshal(*v, &plan)
+ if err != nil {
+ return err
+ }
+ osmc.Plan = &plan
+ }
+ case "properties":
+ if v != nil {
+ var openShiftManagedClusterProperties OpenShiftManagedClusterProperties
+ err = json.Unmarshal(*v, &openShiftManagedClusterProperties)
+ if err != nil {
+ return err
+ }
+ osmc.OpenShiftManagedClusterProperties = &openShiftManagedClusterProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ osmc.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ osmc.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ osmc.Type = &typeVar
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ osmc.Location = &location
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ osmc.Tags = tags
+ }
+ }
+ }
+
+ return nil
+}
+
+// OpenShiftManagedClusterAADIdentityProvider defines the Identity provider for MS AAD.
+type OpenShiftManagedClusterAADIdentityProvider struct {
+ // ClientID - The clientId password associated with the provider.
+ ClientID *string `json:"clientId,omitempty"`
+ // Secret - The secret password associated with the provider.
+ Secret *string `json:"secret,omitempty"`
+ // TenantID - The tenantId associated with the provider.
+ TenantID *string `json:"tenantId,omitempty"`
+ // CustomerAdminGroupID - The groupId to be granted cluster admin role.
+ CustomerAdminGroupID *string `json:"customerAdminGroupId,omitempty"`
+ // Kind - Possible values include: 'KindOpenShiftManagedClusterBaseIdentityProvider', 'KindAADIdentityProvider'
+ Kind Kind `json:"kind,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for OpenShiftManagedClusterAADIdentityProvider.
+func (osmcaip OpenShiftManagedClusterAADIdentityProvider) MarshalJSON() ([]byte, error) {
+ osmcaip.Kind = KindAADIdentityProvider
+ objectMap := make(map[string]interface{})
+ if osmcaip.ClientID != nil {
+ objectMap["clientId"] = osmcaip.ClientID
+ }
+ if osmcaip.Secret != nil {
+ objectMap["secret"] = osmcaip.Secret
+ }
+ if osmcaip.TenantID != nil {
+ objectMap["tenantId"] = osmcaip.TenantID
+ }
+ if osmcaip.CustomerAdminGroupID != nil {
+ objectMap["customerAdminGroupId"] = osmcaip.CustomerAdminGroupID
+ }
+ if osmcaip.Kind != "" {
+ objectMap["kind"] = osmcaip.Kind
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsOpenShiftManagedClusterAADIdentityProvider is the BasicOpenShiftManagedClusterBaseIdentityProvider implementation for OpenShiftManagedClusterAADIdentityProvider.
+func (osmcaip OpenShiftManagedClusterAADIdentityProvider) AsOpenShiftManagedClusterAADIdentityProvider() (*OpenShiftManagedClusterAADIdentityProvider, bool) {
+ return &osmcaip, true
+}
+
+// AsOpenShiftManagedClusterBaseIdentityProvider is the BasicOpenShiftManagedClusterBaseIdentityProvider implementation for OpenShiftManagedClusterAADIdentityProvider.
+func (osmcaip OpenShiftManagedClusterAADIdentityProvider) AsOpenShiftManagedClusterBaseIdentityProvider() (*OpenShiftManagedClusterBaseIdentityProvider, bool) {
+ return nil, false
+}
+
+// AsBasicOpenShiftManagedClusterBaseIdentityProvider is the BasicOpenShiftManagedClusterBaseIdentityProvider implementation for OpenShiftManagedClusterAADIdentityProvider.
+func (osmcaip OpenShiftManagedClusterAADIdentityProvider) AsBasicOpenShiftManagedClusterBaseIdentityProvider() (BasicOpenShiftManagedClusterBaseIdentityProvider, bool) {
+ return &osmcaip, true
+}
+
+// OpenShiftManagedClusterAgentPoolProfile defines the configuration of the OpenShift cluster VMs.
+type OpenShiftManagedClusterAgentPoolProfile struct {
+ // Name - Unique name of the pool profile in the context of the subscription and resource group.
+ Name *string `json:"name,omitempty"`
+ // Count - Number of agents (VMs) to host docker containers.
+ Count *int32 `json:"count,omitempty"`
+ // VMSize - Size of agent VMs. Possible values include: 'StandardD2sV3', 'StandardD4sV3', 'StandardD8sV3', 'StandardD16sV3', 'StandardD32sV3', 'StandardD64sV3', 'StandardDS4V2', 'StandardDS5V2', 'StandardF8sV2', 'StandardF16sV2', 'StandardF32sV2', 'StandardF64sV2', 'StandardF72sV2', 'StandardF8s', 'StandardF16s', 'StandardE4sV3', 'StandardE8sV3', 'StandardE16sV3', 'StandardE20sV3', 'StandardE32sV3', 'StandardE64sV3', 'StandardGS2', 'StandardGS3', 'StandardGS4', 'StandardGS5', 'StandardDS12V2', 'StandardDS13V2', 'StandardDS14V2', 'StandardDS15V2', 'StandardL4s', 'StandardL8s', 'StandardL16s', 'StandardL32s'
+ VMSize OpenShiftContainerServiceVMSize `json:"vmSize,omitempty"`
+ // SubnetCidr - Subnet CIDR for the peering.
+ SubnetCidr *string `json:"subnetCidr,omitempty"`
+ // OsType - OsType to be used to specify os type. Choose from Linux and Windows. Default to Linux. Possible values include: 'Linux', 'Windows'
+ OsType OSType `json:"osType,omitempty"`
+ // Role - Define the role of the AgentPoolProfile. Possible values include: 'Compute', 'Infra'
+ Role OpenShiftAgentPoolProfileRole `json:"role,omitempty"`
+}
+
+// OpenShiftManagedClusterAuthProfile defines all possible authentication profiles for the OpenShift
+// cluster.
+type OpenShiftManagedClusterAuthProfile struct {
+ // IdentityProviders - Type of authentication profile to use.
+ IdentityProviders *[]OpenShiftManagedClusterIdentityProvider `json:"identityProviders,omitempty"`
+}
+
+// BasicOpenShiftManagedClusterBaseIdentityProvider structure for any Identity provider.
+type BasicOpenShiftManagedClusterBaseIdentityProvider interface {
+ AsOpenShiftManagedClusterAADIdentityProvider() (*OpenShiftManagedClusterAADIdentityProvider, bool)
+ AsOpenShiftManagedClusterBaseIdentityProvider() (*OpenShiftManagedClusterBaseIdentityProvider, bool)
+}
+
+// OpenShiftManagedClusterBaseIdentityProvider structure for any Identity provider.
+type OpenShiftManagedClusterBaseIdentityProvider struct {
+ // Kind - Possible values include: 'KindOpenShiftManagedClusterBaseIdentityProvider', 'KindAADIdentityProvider'
+ Kind Kind `json:"kind,omitempty"`
+}
+
+func unmarshalBasicOpenShiftManagedClusterBaseIdentityProvider(body []byte) (BasicOpenShiftManagedClusterBaseIdentityProvider, error) {
+ var m map[string]interface{}
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return nil, err
+ }
+
+ switch m["kind"] {
+ case string(KindAADIdentityProvider):
+ var osmcaip OpenShiftManagedClusterAADIdentityProvider
+ err := json.Unmarshal(body, &osmcaip)
+ return osmcaip, err
+ default:
+ var osmcbip OpenShiftManagedClusterBaseIdentityProvider
+ err := json.Unmarshal(body, &osmcbip)
+ return osmcbip, err
+ }
+}
+func unmarshalBasicOpenShiftManagedClusterBaseIdentityProviderArray(body []byte) ([]BasicOpenShiftManagedClusterBaseIdentityProvider, error) {
+ var rawMessages []*json.RawMessage
+ err := json.Unmarshal(body, &rawMessages)
+ if err != nil {
+ return nil, err
+ }
+
+ osmcbipArray := make([]BasicOpenShiftManagedClusterBaseIdentityProvider, len(rawMessages))
+
+ for index, rawMessage := range rawMessages {
+ osmcbip, err := unmarshalBasicOpenShiftManagedClusterBaseIdentityProvider(*rawMessage)
+ if err != nil {
+ return nil, err
+ }
+ osmcbipArray[index] = osmcbip
+ }
+ return osmcbipArray, nil
+}
+
+// MarshalJSON is the custom marshaler for OpenShiftManagedClusterBaseIdentityProvider.
+func (osmcbip OpenShiftManagedClusterBaseIdentityProvider) MarshalJSON() ([]byte, error) {
+ osmcbip.Kind = KindOpenShiftManagedClusterBaseIdentityProvider
+ objectMap := make(map[string]interface{})
+ if osmcbip.Kind != "" {
+ objectMap["kind"] = osmcbip.Kind
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsOpenShiftManagedClusterAADIdentityProvider is the BasicOpenShiftManagedClusterBaseIdentityProvider implementation for OpenShiftManagedClusterBaseIdentityProvider.
+func (osmcbip OpenShiftManagedClusterBaseIdentityProvider) AsOpenShiftManagedClusterAADIdentityProvider() (*OpenShiftManagedClusterAADIdentityProvider, bool) {
+ return nil, false
+}
+
+// AsOpenShiftManagedClusterBaseIdentityProvider is the BasicOpenShiftManagedClusterBaseIdentityProvider implementation for OpenShiftManagedClusterBaseIdentityProvider.
+func (osmcbip OpenShiftManagedClusterBaseIdentityProvider) AsOpenShiftManagedClusterBaseIdentityProvider() (*OpenShiftManagedClusterBaseIdentityProvider, bool) {
+ return &osmcbip, true
+}
+
+// AsBasicOpenShiftManagedClusterBaseIdentityProvider is the BasicOpenShiftManagedClusterBaseIdentityProvider implementation for OpenShiftManagedClusterBaseIdentityProvider.
+func (osmcbip OpenShiftManagedClusterBaseIdentityProvider) AsBasicOpenShiftManagedClusterBaseIdentityProvider() (BasicOpenShiftManagedClusterBaseIdentityProvider, bool) {
+ return &osmcbip, true
+}
+
+// OpenShiftManagedClusterIdentityProvider defines the configuration of the identity providers to be used
+// in the OpenShift cluster.
+type OpenShiftManagedClusterIdentityProvider struct {
+ // Name - Name of the provider.
+ Name *string `json:"name,omitempty"`
+ // Provider - Configuration of the provider.
+ Provider BasicOpenShiftManagedClusterBaseIdentityProvider `json:"provider,omitempty"`
+}
+
+// UnmarshalJSON is the custom unmarshaler for OpenShiftManagedClusterIdentityProvider struct.
+func (osmcip *OpenShiftManagedClusterIdentityProvider) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ osmcip.Name = &name
+ }
+ case "provider":
+ if v != nil {
+ provider, err := unmarshalBasicOpenShiftManagedClusterBaseIdentityProvider(*v)
+ if err != nil {
+ return err
+ }
+ osmcip.Provider = provider
+ }
+ }
+ }
+
+ return nil
+}
+
+// OpenShiftManagedClusterListResult the response from the List OpenShift Managed Clusters operation.
+type OpenShiftManagedClusterListResult struct {
+ autorest.Response `json:"-"`
+ // Value - The list of OpenShift managed clusters.
+ Value *[]OpenShiftManagedCluster `json:"value,omitempty"`
+ // NextLink - READ-ONLY; The URL to get the next set of OpenShift managed cluster results.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// OpenShiftManagedClusterListResultIterator provides access to a complete listing of
+// OpenShiftManagedCluster values.
+type OpenShiftManagedClusterListResultIterator struct {
+ i int
+ page OpenShiftManagedClusterListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *OpenShiftManagedClusterListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OpenShiftManagedClusterListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *OpenShiftManagedClusterListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter OpenShiftManagedClusterListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter OpenShiftManagedClusterListResultIterator) Response() OpenShiftManagedClusterListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter OpenShiftManagedClusterListResultIterator) Value() OpenShiftManagedCluster {
+ if !iter.page.NotDone() {
+ return OpenShiftManagedCluster{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the OpenShiftManagedClusterListResultIterator type.
+func NewOpenShiftManagedClusterListResultIterator(page OpenShiftManagedClusterListResultPage) OpenShiftManagedClusterListResultIterator {
+ return OpenShiftManagedClusterListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (osmclr OpenShiftManagedClusterListResult) IsEmpty() bool {
+ return osmclr.Value == nil || len(*osmclr.Value) == 0
+}
+
+// openShiftManagedClusterListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (osmclr OpenShiftManagedClusterListResult) openShiftManagedClusterListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if osmclr.NextLink == nil || len(to.String(osmclr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(osmclr.NextLink)))
+}
+
+// OpenShiftManagedClusterListResultPage contains a page of OpenShiftManagedCluster values.
+type OpenShiftManagedClusterListResultPage struct {
+ fn func(context.Context, OpenShiftManagedClusterListResult) (OpenShiftManagedClusterListResult, error)
+ osmclr OpenShiftManagedClusterListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *OpenShiftManagedClusterListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OpenShiftManagedClusterListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.osmclr)
+ if err != nil {
+ return err
+ }
+ page.osmclr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *OpenShiftManagedClusterListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page OpenShiftManagedClusterListResultPage) NotDone() bool {
+ return !page.osmclr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page OpenShiftManagedClusterListResultPage) Response() OpenShiftManagedClusterListResult {
+ return page.osmclr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page OpenShiftManagedClusterListResultPage) Values() []OpenShiftManagedCluster {
+ if page.osmclr.IsEmpty() {
+ return nil
+ }
+ return *page.osmclr.Value
+}
+
+// Creates a new instance of the OpenShiftManagedClusterListResultPage type.
+func NewOpenShiftManagedClusterListResultPage(getNextPage func(context.Context, OpenShiftManagedClusterListResult) (OpenShiftManagedClusterListResult, error)) OpenShiftManagedClusterListResultPage {
+ return OpenShiftManagedClusterListResultPage{fn: getNextPage}
+}
+
+// OpenShiftManagedClusterMasterPoolProfile openShiftManagedClusterMaterPoolProfile contains configuration
+// for OpenShift master VMs.
+type OpenShiftManagedClusterMasterPoolProfile struct {
+ // Name - Unique name of the master pool profile in the context of the subscription and resource group.
+ Name *string `json:"name,omitempty"`
+ // Count - Number of masters (VMs) to host docker containers. The default value is 3.
+ Count *int32 `json:"count,omitempty"`
+ // VMSize - Size of agent VMs. Possible values include: 'StandardD2sV3', 'StandardD4sV3', 'StandardD8sV3', 'StandardD16sV3', 'StandardD32sV3', 'StandardD64sV3', 'StandardDS4V2', 'StandardDS5V2', 'StandardF8sV2', 'StandardF16sV2', 'StandardF32sV2', 'StandardF64sV2', 'StandardF72sV2', 'StandardF8s', 'StandardF16s', 'StandardE4sV3', 'StandardE8sV3', 'StandardE16sV3', 'StandardE20sV3', 'StandardE32sV3', 'StandardE64sV3', 'StandardGS2', 'StandardGS3', 'StandardGS4', 'StandardGS5', 'StandardDS12V2', 'StandardDS13V2', 'StandardDS14V2', 'StandardDS15V2', 'StandardL4s', 'StandardL8s', 'StandardL16s', 'StandardL32s'
+ VMSize OpenShiftContainerServiceVMSize `json:"vmSize,omitempty"`
+ // SubnetCidr - Subnet CIDR for the peering.
+ SubnetCidr *string `json:"subnetCidr,omitempty"`
+ // OsType - OsType to be used to specify os type. Choose from Linux and Windows. Default to Linux. Possible values include: 'Linux', 'Windows'
+ OsType OSType `json:"osType,omitempty"`
+}
+
+// OpenShiftManagedClusterProperties properties of the OpenShift managed cluster.
+type OpenShiftManagedClusterProperties struct {
+ // ProvisioningState - READ-ONLY; The current deployment or provisioning state, which only appears in the response.
+ ProvisioningState *string `json:"provisioningState,omitempty"`
+ // OpenShiftVersion - Version of OpenShift specified when creating the cluster.
+ OpenShiftVersion *string `json:"openShiftVersion,omitempty"`
+ // ClusterVersion - READ-ONLY; Version of OpenShift specified when creating the cluster.
+ ClusterVersion *string `json:"clusterVersion,omitempty"`
+ // PublicHostname - READ-ONLY; Service generated FQDN for OpenShift API server.
+ PublicHostname *string `json:"publicHostname,omitempty"`
+ // Fqdn - READ-ONLY; Service generated FQDN for OpenShift API server loadbalancer internal hostname.
+ Fqdn *string `json:"fqdn,omitempty"`
+ // NetworkProfile - Configuration for OpenShift networking.
+ NetworkProfile *NetworkProfile `json:"networkProfile,omitempty"`
+ // RouterProfiles - Configuration for OpenShift router(s).
+ RouterProfiles *[]OpenShiftRouterProfile `json:"routerProfiles,omitempty"`
+ // MasterPoolProfile - Configuration for OpenShift master VMs.
+ MasterPoolProfile *OpenShiftManagedClusterMasterPoolProfile `json:"masterPoolProfile,omitempty"`
+ // AgentPoolProfiles - Configuration of OpenShift cluster VMs.
+ AgentPoolProfiles *[]OpenShiftManagedClusterAgentPoolProfile `json:"agentPoolProfiles,omitempty"`
+ // AuthProfile - Configures OpenShift authentication.
+ AuthProfile *OpenShiftManagedClusterAuthProfile `json:"authProfile,omitempty"`
+}
+
+// OpenShiftManagedClustersCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of
+// a long-running operation.
+type OpenShiftManagedClustersCreateOrUpdateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *OpenShiftManagedClustersCreateOrUpdateFuture) Result(client OpenShiftManagedClustersClient) (osmc OpenShiftManagedCluster, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("containerservice.OpenShiftManagedClustersCreateOrUpdateFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if osmc.Response.Response, err = future.GetResult(sender); err == nil && osmc.Response.Response.StatusCode != http.StatusNoContent {
+ osmc, err = client.CreateOrUpdateResponder(osmc.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersCreateOrUpdateFuture", "Result", osmc.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// OpenShiftManagedClustersDeleteFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type OpenShiftManagedClustersDeleteFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *OpenShiftManagedClustersDeleteFuture) Result(client OpenShiftManagedClustersClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersDeleteFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("containerservice.OpenShiftManagedClustersDeleteFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// OpenShiftManagedClustersUpdateTagsFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type OpenShiftManagedClustersUpdateTagsFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *OpenShiftManagedClustersUpdateTagsFuture) Result(client OpenShiftManagedClustersClient) (osmc OpenShiftManagedCluster, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersUpdateTagsFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("containerservice.OpenShiftManagedClustersUpdateTagsFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if osmc.Response.Response, err = future.GetResult(sender); err == nil && osmc.Response.Response.StatusCode != http.StatusNoContent {
+ osmc, err = client.UpdateTagsResponder(osmc.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersUpdateTagsFuture", "Result", osmc.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// OpenShiftRouterProfile represents an OpenShift router
+type OpenShiftRouterProfile struct {
+ // Name - Name of the router profile.
+ Name *string `json:"name,omitempty"`
+ // PublicSubdomain - READ-ONLY; DNS subdomain for OpenShift router.
+ PublicSubdomain *string `json:"publicSubdomain,omitempty"`
+ // Fqdn - READ-ONLY; Auto-allocated FQDN for the OpenShift router.
+ Fqdn *string `json:"fqdn,omitempty"`
+}
+
+// OperationListResult the List Compute Operation operation response.
+type OperationListResult struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; The list of compute operations
+ Value *[]OperationValue `json:"value,omitempty"`
+}
+
+// OperationValue describes the properties of a Compute Operation value.
+type OperationValue struct {
+ // Origin - READ-ONLY; The origin of the compute operation.
+ Origin *string `json:"origin,omitempty"`
+ // Name - READ-ONLY; The name of the compute operation.
+ Name *string `json:"name,omitempty"`
+ // OperationValueDisplay - Describes the properties of a Compute Operation Value Display.
+ *OperationValueDisplay `json:"display,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for OperationValue.
+func (ov OperationValue) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if ov.OperationValueDisplay != nil {
+ objectMap["display"] = ov.OperationValueDisplay
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for OperationValue struct.
+func (ov *OperationValue) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "origin":
+ if v != nil {
+ var origin string
+ err = json.Unmarshal(*v, &origin)
+ if err != nil {
+ return err
+ }
+ ov.Origin = &origin
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ ov.Name = &name
+ }
+ case "display":
+ if v != nil {
+ var operationValueDisplay OperationValueDisplay
+ err = json.Unmarshal(*v, &operationValueDisplay)
+ if err != nil {
+ return err
+ }
+ ov.OperationValueDisplay = &operationValueDisplay
+ }
+ }
+ }
+
+ return nil
+}
+
+// OperationValueDisplay describes the properties of a Compute Operation Value Display.
+type OperationValueDisplay struct {
+ // Operation - READ-ONLY; The display name of the compute operation.
+ Operation *string `json:"operation,omitempty"`
+ // Resource - READ-ONLY; The display name of the resource the operation applies to.
+ Resource *string `json:"resource,omitempty"`
+ // Description - READ-ONLY; The description of the operation.
+ Description *string `json:"description,omitempty"`
+ // Provider - READ-ONLY; The resource provider for the operation.
+ Provider *string `json:"provider,omitempty"`
+}
+
+// OrchestratorProfile contains information about orchestrator.
+type OrchestratorProfile struct {
+ // OrchestratorType - Orchestrator type.
+ OrchestratorType *string `json:"orchestratorType,omitempty"`
+ // OrchestratorVersion - Orchestrator version (major, minor, patch).
+ OrchestratorVersion *string `json:"orchestratorVersion,omitempty"`
+ // IsPreview - Whether Kubernetes version is currently in preview.
+ IsPreview *bool `json:"isPreview,omitempty"`
+}
+
+// OrchestratorProfileType profile for the container service orchestrator.
+type OrchestratorProfileType struct {
+ // OrchestratorType - The orchestrator to use to manage container service cluster resources. Valid values are Kubernetes, Swarm, DCOS, DockerCE and Custom. Possible values include: 'Kubernetes', 'Swarm', 'DCOS', 'DockerCE', 'Custom'
+ OrchestratorType OrchestratorTypes `json:"orchestratorType,omitempty"`
+ // OrchestratorVersion - The version of the orchestrator to use. You can specify the major.minor.patch part of the actual version.For example, you can specify version as "1.6.11".
+ OrchestratorVersion *string `json:"orchestratorVersion,omitempty"`
+}
+
+// OrchestratorVersionProfile the profile of an orchestrator and its available versions.
+type OrchestratorVersionProfile struct {
+ // OrchestratorType - Orchestrator type.
+ OrchestratorType *string `json:"orchestratorType,omitempty"`
+ // OrchestratorVersion - Orchestrator version (major, minor, patch).
+ OrchestratorVersion *string `json:"orchestratorVersion,omitempty"`
+ // Default - Installed by default if version is not specified.
+ Default *bool `json:"default,omitempty"`
+ // IsPreview - Whether Kubernetes version is currently in preview.
+ IsPreview *bool `json:"isPreview,omitempty"`
+ // Upgrades - The list of available upgrade versions.
+ Upgrades *[]OrchestratorProfile `json:"upgrades,omitempty"`
+}
+
+// OrchestratorVersionProfileListResult the list of versions for supported orchestrators.
+type OrchestratorVersionProfileListResult struct {
+ autorest.Response `json:"-"`
+ // ID - READ-ONLY; Id of the orchestrator version profile list result.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Name of the orchestrator version profile list result.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Type of the orchestrator version profile list result.
+ Type *string `json:"type,omitempty"`
+ // OrchestratorVersionProfileProperties - The properties of an orchestrator version profile.
+ *OrchestratorVersionProfileProperties `json:"properties,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for OrchestratorVersionProfileListResult.
+func (ovplr OrchestratorVersionProfileListResult) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if ovplr.OrchestratorVersionProfileProperties != nil {
+ objectMap["properties"] = ovplr.OrchestratorVersionProfileProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for OrchestratorVersionProfileListResult struct.
+func (ovplr *OrchestratorVersionProfileListResult) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ ovplr.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ ovplr.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ ovplr.Type = &typeVar
+ }
+ case "properties":
+ if v != nil {
+ var orchestratorVersionProfileProperties OrchestratorVersionProfileProperties
+ err = json.Unmarshal(*v, &orchestratorVersionProfileProperties)
+ if err != nil {
+ return err
+ }
+ ovplr.OrchestratorVersionProfileProperties = &orchestratorVersionProfileProperties
+ }
+ }
+ }
+
+ return nil
+}
+
+// OrchestratorVersionProfileProperties the properties of an orchestrator version profile.
+type OrchestratorVersionProfileProperties struct {
+ // Orchestrators - List of orchestrator version profiles.
+ Orchestrators *[]OrchestratorVersionProfile `json:"orchestrators,omitempty"`
+}
+
+// Properties properties of the container service.
+type Properties struct {
+ // ProvisioningState - READ-ONLY; The current deployment or provisioning state, which only appears in the response.
+ ProvisioningState *string `json:"provisioningState,omitempty"`
+ // OrchestratorProfile - Profile for the container service orchestrator.
+ OrchestratorProfile *OrchestratorProfileType `json:"orchestratorProfile,omitempty"`
+ // CustomProfile - Properties to configure a custom container service cluster.
+ CustomProfile *CustomProfile `json:"customProfile,omitempty"`
+ // ServicePrincipalProfile - Information about a service principal identity for the cluster to use for manipulating Azure APIs. Exact one of secret or keyVaultSecretRef need to be specified.
+ ServicePrincipalProfile *ServicePrincipalProfile `json:"servicePrincipalProfile,omitempty"`
+ // MasterProfile - Profile for the container service master.
+ MasterProfile *MasterProfile `json:"masterProfile,omitempty"`
+ // AgentPoolProfiles - Properties of the agent pool.
+ AgentPoolProfiles *[]AgentPoolProfile `json:"agentPoolProfiles,omitempty"`
+ // WindowsProfile - Profile for Windows VMs in the container service cluster.
+ WindowsProfile *WindowsProfile `json:"windowsProfile,omitempty"`
+ // LinuxProfile - Profile for Linux VMs in the container service cluster.
+ LinuxProfile *LinuxProfile `json:"linuxProfile,omitempty"`
+ // DiagnosticsProfile - Profile for diagnostics in the container service cluster.
+ DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"`
+}
+
+// PurchasePlan used for establishing the purchase context of any 3rd Party artifact through MarketPlace.
+type PurchasePlan struct {
+ // Name - The plan ID.
+ Name *string `json:"name,omitempty"`
+ // Product - Specifies the product of the image from the marketplace. This is the same value as Offer under the imageReference element.
+ Product *string `json:"product,omitempty"`
+ // PromotionCode - The promotion code.
+ PromotionCode *string `json:"promotionCode,omitempty"`
+ // Publisher - The plan ID.
+ Publisher *string `json:"publisher,omitempty"`
+}
+
+// Resource the Resource model definition.
+type Resource struct {
+ // ID - READ-ONLY; Resource Id
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Resource name
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Resource type
+ Type *string `json:"type,omitempty"`
+ // Location - Resource location
+ Location *string `json:"location,omitempty"`
+ // Tags - Resource tags
+ Tags map[string]*string `json:"tags"`
+}
+
+// MarshalJSON is the custom marshaler for Resource.
+func (r Resource) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if r.Location != nil {
+ objectMap["location"] = r.Location
+ }
+ if r.Tags != nil {
+ objectMap["tags"] = r.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// ResourceReference a reference to an Azure resource.
+type ResourceReference struct {
+ // ID - The fully qualified Azure resource id.
+ ID *string `json:"id,omitempty"`
+}
+
+// ServicePrincipalProfile information about a service principal identity for the cluster to use for
+// manipulating Azure APIs. Either secret or keyVaultSecretRef must be specified.
+type ServicePrincipalProfile struct {
+ // ClientID - The ID for the service principal.
+ ClientID *string `json:"clientId,omitempty"`
+ // Secret - The secret password associated with the service principal in plain text.
+ Secret *string `json:"secret,omitempty"`
+ // KeyVaultSecretRef - Reference to a secret stored in Azure Key Vault.
+ KeyVaultSecretRef *KeyVaultSecretRef `json:"keyVaultSecretRef,omitempty"`
+}
+
+// SSHConfiguration SSH configuration for Linux-based VMs running on Azure.
+type SSHConfiguration struct {
+ // PublicKeys - The list of SSH public keys used to authenticate with Linux-based VMs. Only expect one key specified.
+ PublicKeys *[]SSHPublicKey `json:"publicKeys,omitempty"`
+}
+
+// SSHPublicKey contains information about SSH certificate public key data.
+type SSHPublicKey struct {
+ // KeyData - Certificate public key used to authenticate with VMs through SSH. The certificate must be in PEM format with or without headers.
+ KeyData *string `json:"keyData,omitempty"`
+}
+
+// SubResource reference to another subresource.
+type SubResource struct {
+ // ID - READ-ONLY; Resource ID.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource that is unique within a resource group. This name can be used to access the resource.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Resource type
+ Type *string `json:"type,omitempty"`
+}
+
+// TagsObject tags object for patch operations.
+type TagsObject struct {
+ // Tags - Resource tags.
+ Tags map[string]*string `json:"tags"`
+}
+
+// MarshalJSON is the custom marshaler for TagsObject.
+func (toVar TagsObject) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if toVar.Tags != nil {
+ objectMap["tags"] = toVar.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// VMDiagnostics profile for diagnostics on the container service VMs.
+type VMDiagnostics struct {
+ // Enabled - Whether the VM diagnostic agent is provisioned on the VM.
+ Enabled *bool `json:"enabled,omitempty"`
+ // StorageURI - READ-ONLY; The URI of the storage account where diagnostics are stored.
+ StorageURI *string `json:"storageUri,omitempty"`
+}
+
+// WindowsProfile profile for Windows VMs in the container service cluster.
+type WindowsProfile struct {
+ // AdminUsername - The administrator username to use for Windows VMs.
+ AdminUsername *string `json:"adminUsername,omitempty"`
+ // AdminPassword - The administrator password to use for Windows VMs.
+ AdminPassword *string `json:"adminPassword,omitempty"`
+}
diff --git a/services/containerservice/mgmt/2019-08-01/containerservice/openshiftmanagedclusters.go b/services/containerservice/mgmt/2019-08-01/containerservice/openshiftmanagedclusters.go
new file mode 100644
index 000000000000..ac574f3c4121
--- /dev/null
+++ b/services/containerservice/mgmt/2019-08-01/containerservice/openshiftmanagedclusters.go
@@ -0,0 +1,617 @@
+package containerservice
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// OpenShiftManagedClustersClient is the the Container Service Client.
+type OpenShiftManagedClustersClient struct {
+ BaseClient
+}
+
+// NewOpenShiftManagedClustersClient creates an instance of the OpenShiftManagedClustersClient client.
+func NewOpenShiftManagedClustersClient(subscriptionID string) OpenShiftManagedClustersClient {
+ return NewOpenShiftManagedClustersClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewOpenShiftManagedClustersClientWithBaseURI creates an instance of the OpenShiftManagedClustersClient client.
+func NewOpenShiftManagedClustersClientWithBaseURI(baseURI string, subscriptionID string) OpenShiftManagedClustersClient {
+ return OpenShiftManagedClustersClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate creates or updates a OpenShift managed cluster with the specified configuration for agents and
+// OpenShift version.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// resourceName - the name of the OpenShift managed cluster resource.
+// parameters - parameters supplied to the Create or Update an OpenShift Managed Cluster operation.
+func (client OpenShiftManagedClustersClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, parameters OpenShiftManagedCluster) (result OpenShiftManagedClustersCreateOrUpdateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OpenShiftManagedClustersClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters.OpenShiftManagedClusterProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.OpenShiftManagedClusterProperties.OpenShiftVersion", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.OpenShiftManagedClusterProperties.MasterPoolProfile", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.OpenShiftManagedClusterProperties.MasterPoolProfile.Count", Name: validation.Null, Rule: true, Chain: nil}}},
+ }}}}}); err != nil {
+ return result, validation.NewError("containerservice.OpenShiftManagedClustersClient", "CreateOrUpdate", err.Error())
+ }
+
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, resourceName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client OpenShiftManagedClustersClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, resourceName string, parameters OpenShiftManagedCluster) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "resourceName": autorest.Encode("path", resourceName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-04-30"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/openShiftManagedClusters/{resourceName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client OpenShiftManagedClustersClient) CreateOrUpdateSender(req *http.Request) (future OpenShiftManagedClustersCreateOrUpdateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client OpenShiftManagedClustersClient) CreateOrUpdateResponder(resp *http.Response) (result OpenShiftManagedCluster, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes the OpenShift managed cluster with a specified resource group and name.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// resourceName - the name of the OpenShift managed cluster resource.
+func (client OpenShiftManagedClustersClient) Delete(ctx context.Context, resourceGroupName string, resourceName string) (result OpenShiftManagedClustersDeleteFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OpenShiftManagedClustersClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("containerservice.OpenShiftManagedClustersClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, resourceName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.DeleteSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "Delete", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client OpenShiftManagedClustersClient) DeletePreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "resourceName": autorest.Encode("path", resourceName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-04-30"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/openShiftManagedClusters/{resourceName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client OpenShiftManagedClustersClient) DeleteSender(req *http.Request) (future OpenShiftManagedClustersDeleteFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client OpenShiftManagedClustersClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets the details of the managed OpenShift cluster with a specified resource group and name.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// resourceName - the name of the OpenShift managed cluster resource.
+func (client OpenShiftManagedClustersClient) Get(ctx context.Context, resourceGroupName string, resourceName string) (result OpenShiftManagedCluster, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OpenShiftManagedClustersClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("containerservice.OpenShiftManagedClustersClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, resourceName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client OpenShiftManagedClustersClient) GetPreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "resourceName": autorest.Encode("path", resourceName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-04-30"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/openShiftManagedClusters/{resourceName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client OpenShiftManagedClustersClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client OpenShiftManagedClustersClient) GetResponder(resp *http.Response) (result OpenShiftManagedCluster, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List gets a list of OpenShift managed clusters in the specified subscription. The operation returns properties of
+// each OpenShift managed cluster.
+func (client OpenShiftManagedClustersClient) List(ctx context.Context) (result OpenShiftManagedClusterListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OpenShiftManagedClustersClient.List")
+ defer func() {
+ sc := -1
+ if result.osmclr.Response.Response != nil {
+ sc = result.osmclr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.osmclr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.osmclr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client OpenShiftManagedClustersClient) ListPreparer(ctx context.Context) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-04-30"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/openShiftManagedClusters", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client OpenShiftManagedClustersClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client OpenShiftManagedClustersClient) ListResponder(resp *http.Response) (result OpenShiftManagedClusterListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client OpenShiftManagedClustersClient) listNextResults(ctx context.Context, lastResults OpenShiftManagedClusterListResult) (result OpenShiftManagedClusterListResult, err error) {
+ req, err := lastResults.openShiftManagedClusterListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client OpenShiftManagedClustersClient) ListComplete(ctx context.Context) (result OpenShiftManagedClusterListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OpenShiftManagedClustersClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx)
+ return
+}
+
+// ListByResourceGroup lists OpenShift managed clusters in the specified subscription and resource group. The operation
+// returns properties of each OpenShift managed cluster.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+func (client OpenShiftManagedClustersClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result OpenShiftManagedClusterListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OpenShiftManagedClustersClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.osmclr.Response.Response != nil {
+ sc = result.osmclr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("containerservice.OpenShiftManagedClustersClient", "ListByResourceGroup", err.Error())
+ }
+
+ result.fn = client.listByResourceGroupNextResults
+ req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "ListByResourceGroup", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByResourceGroupSender(req)
+ if err != nil {
+ result.osmclr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "ListByResourceGroup", resp, "Failure sending request")
+ return
+ }
+
+ result.osmclr, err = client.ListByResourceGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "ListByResourceGroup", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
+func (client OpenShiftManagedClustersClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-04-30"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/openShiftManagedClusters", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
+// http.Response Body if it receives an error.
+func (client OpenShiftManagedClustersClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
+// closes the http.Response Body.
+func (client OpenShiftManagedClustersClient) ListByResourceGroupResponder(resp *http.Response) (result OpenShiftManagedClusterListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByResourceGroupNextResults retrieves the next set of results, if any.
+func (client OpenShiftManagedClustersClient) listByResourceGroupNextResults(ctx context.Context, lastResults OpenShiftManagedClusterListResult) (result OpenShiftManagedClusterListResult, err error) {
+ req, err := lastResults.openShiftManagedClusterListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByResourceGroupSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByResourceGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
+func (client OpenShiftManagedClustersClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result OpenShiftManagedClusterListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OpenShiftManagedClustersClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByResourceGroup(ctx, resourceGroupName)
+ return
+}
+
+// UpdateTags updates an OpenShift managed cluster with the specified tags.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// resourceName - the name of the OpenShift managed cluster resource.
+// parameters - parameters supplied to the Update OpenShift Managed Cluster Tags operation.
+func (client OpenShiftManagedClustersClient) UpdateTags(ctx context.Context, resourceGroupName string, resourceName string, parameters TagsObject) (result OpenShiftManagedClustersUpdateTagsFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OpenShiftManagedClustersClient.UpdateTags")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("containerservice.OpenShiftManagedClustersClient", "UpdateTags", err.Error())
+ }
+
+ req, err := client.UpdateTagsPreparer(ctx, resourceGroupName, resourceName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "UpdateTags", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.UpdateTagsSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.OpenShiftManagedClustersClient", "UpdateTags", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// UpdateTagsPreparer prepares the UpdateTags request.
+func (client OpenShiftManagedClustersClient) UpdateTagsPreparer(ctx context.Context, resourceGroupName string, resourceName string, parameters TagsObject) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "resourceName": autorest.Encode("path", resourceName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-04-30"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/openShiftManagedClusters/{resourceName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateTagsSender sends the UpdateTags request. The method will close the
+// http.Response Body if it receives an error.
+func (client OpenShiftManagedClustersClient) UpdateTagsSender(req *http.Request) (future OpenShiftManagedClustersUpdateTagsFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// UpdateTagsResponder handles the response to the UpdateTags request. The method always
+// closes the http.Response Body.
+func (client OpenShiftManagedClustersClient) UpdateTagsResponder(resp *http.Response) (result OpenShiftManagedCluster, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/containerservice/mgmt/2019-08-01/containerservice/operations.go b/services/containerservice/mgmt/2019-08-01/containerservice/operations.go
new file mode 100644
index 000000000000..a61a628971f6
--- /dev/null
+++ b/services/containerservice/mgmt/2019-08-01/containerservice/operations.go
@@ -0,0 +1,109 @@
+package containerservice
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// OperationsClient is the the Container Service Client.
+type OperationsClient struct {
+ BaseClient
+}
+
+// NewOperationsClient creates an instance of the OperationsClient client.
+func NewOperationsClient(subscriptionID string) OperationsClient {
+ return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client.
+func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient {
+ return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// List gets a list of compute operations.
+func (client OperationsClient) List(ctx context.Context) (result OperationListResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.ListPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.OperationsClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerservice.OperationsClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerservice.OperationsClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) {
+ const APIVersion = "2019-08-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPath("/providers/Microsoft.ContainerService/operations"),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/containerservice/mgmt/2019-08-01/containerservice/version.go b/services/containerservice/mgmt/2019-08-01/containerservice/version.go
new file mode 100644
index 000000000000..410a9d32aff0
--- /dev/null
+++ b/services/containerservice/mgmt/2019-08-01/containerservice/version.go
@@ -0,0 +1,30 @@
+package containerservice
+
+import "github.com/Azure/azure-sdk-for-go/version"
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+// UserAgent returns the UserAgent string to use when sending http.Requests.
+func UserAgent() string {
+ return "Azure-SDK-For-Go/" + version.Number + " containerservice/2019-08-01"
+}
+
+// Version returns the semantic version (see http://semver.org) of the client.
+func Version() string {
+ return version.Number
+}
diff --git a/services/datafactory/mgmt/2018-06-01/datafactory/datafactoryapi/interfaces.go b/services/datafactory/mgmt/2018-06-01/datafactory/datafactoryapi/interfaces.go
index bde967eebfef..547619423f12 100644
--- a/services/datafactory/mgmt/2018-06-01/datafactory/datafactoryapi/interfaces.go
+++ b/services/datafactory/mgmt/2018-06-01/datafactory/datafactoryapi/interfaces.go
@@ -145,13 +145,24 @@ type TriggersClientAPI interface {
CreateOrUpdate(ctx context.Context, resourceGroupName string, factoryName string, triggerName string, trigger datafactory.TriggerResource, ifMatch string) (result datafactory.TriggerResource, err error)
Delete(ctx context.Context, resourceGroupName string, factoryName string, triggerName string) (result autorest.Response, err error)
Get(ctx context.Context, resourceGroupName string, factoryName string, triggerName string, ifNoneMatch string) (result datafactory.TriggerResource, err error)
+ GetEventSubscriptionStatus(ctx context.Context, resourceGroupName string, factoryName string, triggerName string) (result datafactory.TriggerSubscriptionOperationStatus, err error)
ListByFactory(ctx context.Context, resourceGroupName string, factoryName string) (result datafactory.TriggerListResponsePage, err error)
Start(ctx context.Context, resourceGroupName string, factoryName string, triggerName string) (result datafactory.TriggersStartFuture, err error)
Stop(ctx context.Context, resourceGroupName string, factoryName string, triggerName string) (result datafactory.TriggersStopFuture, err error)
+ SubscribeToEvents(ctx context.Context, resourceGroupName string, factoryName string, triggerName string) (result datafactory.TriggersSubscribeToEventsFuture, err error)
+ UnsubscribeFromEvents(ctx context.Context, resourceGroupName string, factoryName string, triggerName string) (result datafactory.TriggersUnsubscribeFromEventsFuture, err error)
}
var _ TriggersClientAPI = (*datafactory.TriggersClient)(nil)
+// TriggerRunsClientAPI contains the set of methods on the TriggerRunsClient type.
+type TriggerRunsClientAPI interface {
+ QueryByFactory(ctx context.Context, resourceGroupName string, factoryName string, filterParameters datafactory.RunFilterParameters) (result datafactory.TriggerRunsQueryResponse, err error)
+ Rerun(ctx context.Context, resourceGroupName string, factoryName string, triggerName string, runID string) (result autorest.Response, err error)
+}
+
+var _ TriggerRunsClientAPI = (*datafactory.TriggerRunsClient)(nil)
+
// RerunTriggersClientAPI contains the set of methods on the RerunTriggersClient type.
type RerunTriggersClientAPI interface {
Cancel(ctx context.Context, resourceGroupName string, factoryName string, triggerName string, rerunTriggerName string) (result datafactory.RerunTriggersCancelFuture, err error)
@@ -162,10 +173,3 @@ type RerunTriggersClientAPI interface {
}
var _ RerunTriggersClientAPI = (*datafactory.RerunTriggersClient)(nil)
-
-// TriggerRunsClientAPI contains the set of methods on the TriggerRunsClient type.
-type TriggerRunsClientAPI interface {
- QueryByFactory(ctx context.Context, resourceGroupName string, factoryName string, filterParameters datafactory.RunFilterParameters) (result datafactory.TriggerRunsQueryResponse, err error)
-}
-
-var _ TriggerRunsClientAPI = (*datafactory.TriggerRunsClient)(nil)
diff --git a/services/datafactory/mgmt/2018-06-01/datafactory/models.go b/services/datafactory/mgmt/2018-06-01/datafactory/models.go
index 59b25301eaed..dd3c922d71a8 100644
--- a/services/datafactory/mgmt/2018-06-01/datafactory/models.go
+++ b/services/datafactory/mgmt/2018-06-01/datafactory/models.go
@@ -68,6 +68,27 @@ func PossibleAuthorizationTypeValues() []AuthorizationType {
return []AuthorizationType{AuthorizationTypeKey, AuthorizationTypeLinkedIntegrationRuntimeType, AuthorizationTypeRBAC}
}
+// AvroCompressionCodec enumerates the values for avro compression codec.
+type AvroCompressionCodec string
+
+const (
+ // Bzip2 ...
+ Bzip2 AvroCompressionCodec = "bzip2"
+ // Deflate ...
+ Deflate AvroCompressionCodec = "deflate"
+ // None ...
+ None AvroCompressionCodec = "none"
+ // Snappy ...
+ Snappy AvroCompressionCodec = "snappy"
+ // Xz ...
+ Xz AvroCompressionCodec = "xz"
+)
+
+// PossibleAvroCompressionCodecValues returns an array of possible values for the AvroCompressionCodec const type.
+func PossibleAvroCompressionCodecValues() []AvroCompressionCodec {
+ return []AvroCompressionCodec{Bzip2, Deflate, None, Snappy, Xz}
+}
+
// AzureFunctionActivityMethod enumerates the values for azure function activity method.
type AzureFunctionActivityMethod string
@@ -266,6 +287,27 @@ func PossibleDynamicsDeploymentTypeValues() []DynamicsDeploymentType {
return []DynamicsDeploymentType{Online, OnPremisesWithIfd}
}
+// EventSubscriptionStatus enumerates the values for event subscription status.
+type EventSubscriptionStatus string
+
+const (
+ // Deprovisioning ...
+ Deprovisioning EventSubscriptionStatus = "Deprovisioning"
+ // Disabled ...
+ Disabled EventSubscriptionStatus = "Disabled"
+ // Enabled ...
+ Enabled EventSubscriptionStatus = "Enabled"
+ // Provisioning ...
+ Provisioning EventSubscriptionStatus = "Provisioning"
+ // Unknown ...
+ Unknown EventSubscriptionStatus = "Unknown"
+)
+
+// PossibleEventSubscriptionStatusValues returns an array of possible values for the EventSubscriptionStatus const type.
+func PossibleEventSubscriptionStatusValues() []EventSubscriptionStatus {
+ return []EventSubscriptionStatus{Deprovisioning, Disabled, Enabled, Provisioning, Unknown}
+}
+
// FtpAuthenticationType enumerates the values for ftp authentication type.
type FtpAuthenticationType string
@@ -330,17 +372,17 @@ func PossibleHBaseAuthenticationTypeValues() []HBaseAuthenticationType {
type HDInsightActivityDebugInfoOption string
const (
- // Always ...
- Always HDInsightActivityDebugInfoOption = "Always"
- // Failure ...
- Failure HDInsightActivityDebugInfoOption = "Failure"
- // None ...
- None HDInsightActivityDebugInfoOption = "None"
+ // HDInsightActivityDebugInfoOptionAlways ...
+ HDInsightActivityDebugInfoOptionAlways HDInsightActivityDebugInfoOption = "Always"
+ // HDInsightActivityDebugInfoOptionFailure ...
+ HDInsightActivityDebugInfoOptionFailure HDInsightActivityDebugInfoOption = "Failure"
+ // HDInsightActivityDebugInfoOptionNone ...
+ HDInsightActivityDebugInfoOptionNone HDInsightActivityDebugInfoOption = "None"
)
// PossibleHDInsightActivityDebugInfoOptionValues returns an array of possible values for the HDInsightActivityDebugInfoOption const type.
func PossibleHDInsightActivityDebugInfoOptionValues() []HDInsightActivityDebugInfoOption {
- return []HDInsightActivityDebugInfoOption{Always, Failure, None}
+ return []HDInsightActivityDebugInfoOption{HDInsightActivityDebugInfoOptionAlways, HDInsightActivityDebugInfoOptionFailure, HDInsightActivityDebugInfoOptionNone}
}
// HiveAuthenticationType enumerates the values for hive authentication type.
@@ -610,6 +652,21 @@ func PossibleIntegrationRuntimeUpdateResultValues() []IntegrationRuntimeUpdateRe
return []IntegrationRuntimeUpdateResult{IntegrationRuntimeUpdateResultFail, IntegrationRuntimeUpdateResultNone, IntegrationRuntimeUpdateResultSucceed}
}
+// JSONWriteFilePattern enumerates the values for json write file pattern.
+type JSONWriteFilePattern string
+
+const (
+ // ArrayOfObjects ...
+ ArrayOfObjects JSONWriteFilePattern = "arrayOfObjects"
+ // SetOfObjects ...
+ SetOfObjects JSONWriteFilePattern = "setOfObjects"
+)
+
+// PossibleJSONWriteFilePatternValues returns an array of possible values for the JSONWriteFilePattern const type.
+func PossibleJSONWriteFilePatternValues() []JSONWriteFilePattern {
+ return []JSONWriteFilePattern{ArrayOfObjects, SetOfObjects}
+}
+
// ManagedIntegrationRuntimeNodeStatus enumerates the values for managed integration runtime node status.
type ManagedIntegrationRuntimeNodeStatus string
@@ -1248,17 +1305,17 @@ func PossibleTriggerRunStatusValues() []TriggerRunStatus {
type TriggerRuntimeState string
const (
- // Disabled ...
- Disabled TriggerRuntimeState = "Disabled"
- // Started ...
- Started TriggerRuntimeState = "Started"
- // Stopped ...
- Stopped TriggerRuntimeState = "Stopped"
+ // TriggerRuntimeStateDisabled ...
+ TriggerRuntimeStateDisabled TriggerRuntimeState = "Disabled"
+ // TriggerRuntimeStateStarted ...
+ TriggerRuntimeStateStarted TriggerRuntimeState = "Started"
+ // TriggerRuntimeStateStopped ...
+ TriggerRuntimeStateStopped TriggerRuntimeState = "Stopped"
)
// PossibleTriggerRuntimeStateValues returns an array of possible values for the TriggerRuntimeState const type.
func PossibleTriggerRuntimeStateValues() []TriggerRuntimeState {
- return []TriggerRuntimeState{Disabled, Started, Stopped}
+ return []TriggerRuntimeState{TriggerRuntimeStateDisabled, TriggerRuntimeStateStarted, TriggerRuntimeStateStopped}
}
// TumblingWindowFrequency enumerates the values for tumbling window frequency.
@@ -1376,12 +1433,16 @@ func PossibleTypeBasicActivityValues() []TypeBasicActivity {
type TypeBasicCopySink string
const (
+ // TypeAvroSink ...
+ TypeAvroSink TypeBasicCopySink = "AvroSink"
// TypeAzureBlobFSSink ...
TypeAzureBlobFSSink TypeBasicCopySink = "AzureBlobFSSink"
// TypeAzureDataExplorerSink ...
TypeAzureDataExplorerSink TypeBasicCopySink = "AzureDataExplorerSink"
// TypeAzureDataLakeStoreSink ...
TypeAzureDataLakeStoreSink TypeBasicCopySink = "AzureDataLakeStoreSink"
+ // TypeAzureMySQLSink ...
+ TypeAzureMySQLSink TypeBasicCopySink = "AzureMySqlSink"
// TypeAzurePostgreSQLSink ...
TypeAzurePostgreSQLSink TypeBasicCopySink = "AzurePostgreSqlSink"
// TypeAzureQueueSink ...
@@ -1414,6 +1475,8 @@ const (
TypeFileSystemSink TypeBasicCopySink = "FileSystemSink"
// TypeInformixSink ...
TypeInformixSink TypeBasicCopySink = "InformixSink"
+ // TypeJSONSink ...
+ TypeJSONSink TypeBasicCopySink = "JsonSink"
// TypeMicrosoftAccessSink ...
TypeMicrosoftAccessSink TypeBasicCopySink = "MicrosoftAccessSink"
// TypeOdbcSink ...
@@ -1440,7 +1503,7 @@ const (
// PossibleTypeBasicCopySinkValues returns an array of possible values for the TypeBasicCopySink const type.
func PossibleTypeBasicCopySinkValues() []TypeBasicCopySink {
- return []TypeBasicCopySink{TypeAzureBlobFSSink, TypeAzureDataExplorerSink, TypeAzureDataLakeStoreSink, TypeAzurePostgreSQLSink, TypeAzureQueueSink, TypeAzureSearchIndexSink, TypeAzureSQLSink, TypeAzureTableSink, TypeBinarySink, TypeBlobSink, TypeCommonDataServiceForAppsSink, TypeCopySink, TypeCosmosDbMongoDbAPISink, TypeDelimitedTextSink, TypeDocumentDbCollectionSink, TypeDynamicsCrmSink, TypeDynamicsSink, TypeFileSystemSink, TypeInformixSink, TypeMicrosoftAccessSink, TypeOdbcSink, TypeOracleSink, TypeParquetSink, TypeSalesforceServiceCloudSink, TypeSalesforceSink, TypeSapCloudForCustomerSink, TypeSQLDWSink, TypeSQLMISink, TypeSQLServerSink, TypeSQLSink}
+ return []TypeBasicCopySink{TypeAvroSink, TypeAzureBlobFSSink, TypeAzureDataExplorerSink, TypeAzureDataLakeStoreSink, TypeAzureMySQLSink, TypeAzurePostgreSQLSink, TypeAzureQueueSink, TypeAzureSearchIndexSink, TypeAzureSQLSink, TypeAzureTableSink, TypeBinarySink, TypeBlobSink, TypeCommonDataServiceForAppsSink, TypeCopySink, TypeCosmosDbMongoDbAPISink, TypeDelimitedTextSink, TypeDocumentDbCollectionSink, TypeDynamicsCrmSink, TypeDynamicsSink, TypeFileSystemSink, TypeInformixSink, TypeJSONSink, TypeMicrosoftAccessSink, TypeOdbcSink, TypeOracleSink, TypeParquetSink, TypeSalesforceServiceCloudSink, TypeSalesforceSink, TypeSapCloudForCustomerSink, TypeSQLDWSink, TypeSQLMISink, TypeSQLServerSink, TypeSQLSink}
}
// TypeBasicCopySource enumerates the values for type basic copy source.
@@ -1451,12 +1514,16 @@ const (
TypeAmazonMWSSource TypeBasicCopySource = "AmazonMWSSource"
// TypeAmazonRedshiftSource ...
TypeAmazonRedshiftSource TypeBasicCopySource = "AmazonRedshiftSource"
+ // TypeAvroSource ...
+ TypeAvroSource TypeBasicCopySource = "AvroSource"
// TypeAzureBlobFSSource ...
TypeAzureBlobFSSource TypeBasicCopySource = "AzureBlobFSSource"
// TypeAzureDataExplorerSource ...
TypeAzureDataExplorerSource TypeBasicCopySource = "AzureDataExplorerSource"
// TypeAzureDataLakeStoreSource ...
TypeAzureDataLakeStoreSource TypeBasicCopySource = "AzureDataLakeStoreSource"
+ // TypeAzureMariaDBSource ...
+ TypeAzureMariaDBSource TypeBasicCopySource = "AzureMariaDBSource"
// TypeAzureMySQLSource ...
TypeAzureMySQLSource TypeBasicCopySource = "AzureMySqlSource"
// TypeAzurePostgreSQLSource ...
@@ -1521,6 +1588,8 @@ const (
TypeInformixSource TypeBasicCopySource = "InformixSource"
// TypeJiraSource ...
TypeJiraSource TypeBasicCopySource = "JiraSource"
+ // TypeJSONSource ...
+ TypeJSONSource TypeBasicCopySource = "JsonSource"
// TypeMagentoSource ...
TypeMagentoSource TypeBasicCopySource = "MagentoSource"
// TypeMariaDBSource ...
@@ -1571,6 +1640,8 @@ const (
TypeSalesforceServiceCloudSource TypeBasicCopySource = "SalesforceServiceCloudSource"
// TypeSalesforceSource ...
TypeSalesforceSource TypeBasicCopySource = "SalesforceSource"
+ // TypeSapBwSource ...
+ TypeSapBwSource TypeBasicCopySource = "SapBwSource"
// TypeSapCloudForCustomerSource ...
TypeSapCloudForCustomerSource TypeBasicCopySource = "SapCloudForCustomerSource"
// TypeSapEccSource ...
@@ -1613,7 +1684,7 @@ const (
// PossibleTypeBasicCopySourceValues returns an array of possible values for the TypeBasicCopySource const type.
func PossibleTypeBasicCopySourceValues() []TypeBasicCopySource {
- return []TypeBasicCopySource{TypeAmazonMWSSource, TypeAmazonRedshiftSource, TypeAzureBlobFSSource, TypeAzureDataExplorerSource, TypeAzureDataLakeStoreSource, TypeAzureMySQLSource, TypeAzurePostgreSQLSource, TypeAzureSQLSource, TypeAzureTableSource, TypeBinarySource, TypeBlobSource, TypeCassandraSource, TypeCommonDataServiceForAppsSource, TypeConcurSource, TypeCopySource, TypeCosmosDbMongoDbAPISource, TypeCouchbaseSource, TypeDb2Source, TypeDelimitedTextSource, TypeDocumentDbCollectionSource, TypeDrillSource, TypeDynamicsAXSource, TypeDynamicsCrmSource, TypeDynamicsSource, TypeEloquaSource, TypeFileSystemSource, TypeGoogleAdWordsSource, TypeGoogleBigQuerySource, TypeGreenplumSource, TypeHBaseSource, TypeHdfsSource, TypeHiveSource, TypeHTTPSource, TypeHubspotSource, TypeImpalaSource, TypeInformixSource, TypeJiraSource, TypeMagentoSource, TypeMariaDBSource, TypeMarketoSource, TypeMicrosoftAccessSource, TypeMongoDbSource, TypeMongoDbV2Source, TypeMySQLSource, TypeNetezzaSource, TypeODataSource, TypeOdbcSource, TypeOffice365Source, TypeOracleServiceCloudSource, TypeOracleSource, TypeParquetSource, TypePaypalSource, TypePhoenixSource, TypePostgreSQLSource, TypePrestoSource, TypeQuickBooksSource, TypeRelationalSource, TypeResponsysSource, TypeRestSource, TypeSalesforceMarketingCloudSource, TypeSalesforceServiceCloudSource, TypeSalesforceSource, TypeSapCloudForCustomerSource, TypeSapEccSource, TypeSapHanaSource, TypeSapOpenHubSource, TypeSapTableSource, TypeServiceNowSource, TypeShopifySource, TypeSparkSource, TypeSQLDWSource, TypeSQLMISource, TypeSQLServerSource, TypeSQLSource, TypeSquareSource, TypeSybaseSource, TypeTeradataSource, TypeVerticaSource, TypeWebSource, TypeXeroSource, TypeZohoSource}
+ return []TypeBasicCopySource{TypeAmazonMWSSource, TypeAmazonRedshiftSource, TypeAvroSource, TypeAzureBlobFSSource, TypeAzureDataExplorerSource, TypeAzureDataLakeStoreSource, TypeAzureMariaDBSource, TypeAzureMySQLSource, TypeAzurePostgreSQLSource, TypeAzureSQLSource, TypeAzureTableSource, TypeBinarySource, TypeBlobSource, TypeCassandraSource, TypeCommonDataServiceForAppsSource, TypeConcurSource, TypeCopySource, TypeCosmosDbMongoDbAPISource, TypeCouchbaseSource, TypeDb2Source, TypeDelimitedTextSource, TypeDocumentDbCollectionSource, TypeDrillSource, TypeDynamicsAXSource, TypeDynamicsCrmSource, TypeDynamicsSource, TypeEloquaSource, TypeFileSystemSource, TypeGoogleAdWordsSource, TypeGoogleBigQuerySource, TypeGreenplumSource, TypeHBaseSource, TypeHdfsSource, TypeHiveSource, TypeHTTPSource, TypeHubspotSource, TypeImpalaSource, TypeInformixSource, TypeJiraSource, TypeJSONSource, TypeMagentoSource, TypeMariaDBSource, TypeMarketoSource, TypeMicrosoftAccessSource, TypeMongoDbSource, TypeMongoDbV2Source, TypeMySQLSource, TypeNetezzaSource, TypeODataSource, TypeOdbcSource, TypeOffice365Source, TypeOracleServiceCloudSource, TypeOracleSource, TypeParquetSource, TypePaypalSource, TypePhoenixSource, TypePostgreSQLSource, TypePrestoSource, TypeQuickBooksSource, TypeRelationalSource, TypeResponsysSource, TypeRestSource, TypeSalesforceMarketingCloudSource, TypeSalesforceServiceCloudSource, TypeSalesforceSource, TypeSapBwSource, TypeSapCloudForCustomerSource, TypeSapEccSource, TypeSapHanaSource, TypeSapOpenHubSource, TypeSapTableSource, TypeServiceNowSource, TypeShopifySource, TypeSparkSource, TypeSQLDWSource, TypeSQLMISource, TypeSQLServerSource, TypeSQLSource, TypeSquareSource, TypeSybaseSource, TypeTeradataSource, TypeVerticaSource, TypeWebSource, TypeXeroSource, TypeZohoSource}
}
// TypeBasicDataset enumerates the values for type basic dataset.
@@ -1622,8 +1693,12 @@ type TypeBasicDataset string
const (
// TypeAmazonMWSObject ...
TypeAmazonMWSObject TypeBasicDataset = "AmazonMWSObject"
+ // TypeAmazonRedshiftTable ...
+ TypeAmazonRedshiftTable TypeBasicDataset = "AmazonRedshiftTable"
// TypeAmazonS3Object ...
TypeAmazonS3Object TypeBasicDataset = "AmazonS3Object"
+ // TypeAvro ...
+ TypeAvro TypeBasicDataset = "Avro"
// TypeAzureBlob ...
TypeAzureBlob TypeBasicDataset = "AzureBlob"
// TypeAzureBlobFSFile ...
@@ -1632,6 +1707,8 @@ const (
TypeAzureDataExplorerTable TypeBasicDataset = "AzureDataExplorerTable"
// TypeAzureDataLakeStoreFile ...
TypeAzureDataLakeStoreFile TypeBasicDataset = "AzureDataLakeStoreFile"
+ // TypeAzureMariaDBTable ...
+ TypeAzureMariaDBTable TypeBasicDataset = "AzureMariaDBTable"
// TypeAzureMySQLTable ...
TypeAzureMySQLTable TypeBasicDataset = "AzureMySqlTable"
// TypeAzurePostgreSQLTable ...
@@ -1662,6 +1739,8 @@ const (
TypeCustomDataset TypeBasicDataset = "CustomDataset"
// TypeDataset ...
TypeDataset TypeBasicDataset = "Dataset"
+ // TypeDb2Table ...
+ TypeDb2Table TypeBasicDataset = "Db2Table"
// TypeDelimitedText ...
TypeDelimitedText TypeBasicDataset = "DelimitedText"
// TypeDocumentDbCollection ...
@@ -1698,6 +1777,8 @@ const (
TypeInformixTable TypeBasicDataset = "InformixTable"
// TypeJiraObject ...
TypeJiraObject TypeBasicDataset = "JiraObject"
+ // TypeJSON ...
+ TypeJSON TypeBasicDataset = "Json"
// TypeMagentoObject ...
TypeMagentoObject TypeBasicDataset = "MagentoObject"
// TypeMariaDBTable ...
@@ -1748,6 +1829,8 @@ const (
TypeSalesforceObject TypeBasicDataset = "SalesforceObject"
// TypeSalesforceServiceCloudObject ...
TypeSalesforceServiceCloudObject TypeBasicDataset = "SalesforceServiceCloudObject"
+ // TypeSapBwCube ...
+ TypeSapBwCube TypeBasicDataset = "SapBwCube"
// TypeSapCloudForCustomerResource ...
TypeSapCloudForCustomerResource TypeBasicDataset = "SapCloudForCustomerResource"
// TypeSapEccResource ...
@@ -1768,6 +1851,8 @@ const (
TypeSQLServerTable TypeBasicDataset = "SqlServerTable"
// TypeSquareObject ...
TypeSquareObject TypeBasicDataset = "SquareObject"
+ // TypeSybaseTable ...
+ TypeSybaseTable TypeBasicDataset = "SybaseTable"
// TypeTeradataTable ...
TypeTeradataTable TypeBasicDataset = "TeradataTable"
// TypeVerticaTable ...
@@ -1782,7 +1867,7 @@ const (
// PossibleTypeBasicDatasetValues returns an array of possible values for the TypeBasicDataset const type.
func PossibleTypeBasicDatasetValues() []TypeBasicDataset {
- return []TypeBasicDataset{TypeAmazonMWSObject, TypeAmazonS3Object, TypeAzureBlob, TypeAzureBlobFSFile, TypeAzureDataExplorerTable, TypeAzureDataLakeStoreFile, TypeAzureMySQLTable, TypeAzurePostgreSQLTable, TypeAzureSearchIndex, TypeAzureSQLDWTable, TypeAzureSQLMITable, TypeAzureSQLTable, TypeAzureTable, TypeBinary, TypeCassandraTable, TypeCommonDataServiceForAppsEntity, TypeConcurObject, TypeCosmosDbMongoDbAPICollection, TypeCouchbaseTable, TypeCustomDataset, TypeDataset, TypeDelimitedText, TypeDocumentDbCollection, TypeDrillTable, TypeDynamicsAXResource, TypeDynamicsCrmEntity, TypeDynamicsEntity, TypeEloquaObject, TypeFileShare, TypeGoogleAdWordsObject, TypeGoogleBigQueryObject, TypeGreenplumTable, TypeHBaseObject, TypeHiveObject, TypeHTTPFile, TypeHubspotObject, TypeImpalaObject, TypeInformixTable, TypeJiraObject, TypeMagentoObject, TypeMariaDBTable, TypeMarketoObject, TypeMicrosoftAccessTable, TypeMongoDbCollection, TypeMongoDbV2Collection, TypeMySQLTable, TypeNetezzaTable, TypeODataResource, TypeOdbcTable, TypeOffice365Table, TypeOracleServiceCloudObject, TypeOracleTable, TypeParquet, TypePaypalObject, TypePhoenixObject, TypePostgreSQLTable, TypePrestoObject, TypeQuickBooksObject, TypeRelationalTable, TypeResponsysObject, TypeRestResource, TypeSalesforceMarketingCloudObject, TypeSalesforceObject, TypeSalesforceServiceCloudObject, TypeSapCloudForCustomerResource, TypeSapEccResource, TypeSapHanaTable, TypeSapOpenHubTable, TypeSapTableResource, TypeServiceNowObject, TypeShopifyObject, TypeSparkObject, TypeSQLServerTable, TypeSquareObject, TypeTeradataTable, TypeVerticaTable, TypeWebTable, TypeXeroObject, TypeZohoObject}
+ return []TypeBasicDataset{TypeAmazonMWSObject, TypeAmazonRedshiftTable, TypeAmazonS3Object, TypeAvro, TypeAzureBlob, TypeAzureBlobFSFile, TypeAzureDataExplorerTable, TypeAzureDataLakeStoreFile, TypeAzureMariaDBTable, TypeAzureMySQLTable, TypeAzurePostgreSQLTable, TypeAzureSearchIndex, TypeAzureSQLDWTable, TypeAzureSQLMITable, TypeAzureSQLTable, TypeAzureTable, TypeBinary, TypeCassandraTable, TypeCommonDataServiceForAppsEntity, TypeConcurObject, TypeCosmosDbMongoDbAPICollection, TypeCouchbaseTable, TypeCustomDataset, TypeDataset, TypeDb2Table, TypeDelimitedText, TypeDocumentDbCollection, TypeDrillTable, TypeDynamicsAXResource, TypeDynamicsCrmEntity, TypeDynamicsEntity, TypeEloquaObject, TypeFileShare, TypeGoogleAdWordsObject, TypeGoogleBigQueryObject, TypeGreenplumTable, TypeHBaseObject, TypeHiveObject, TypeHTTPFile, TypeHubspotObject, TypeImpalaObject, TypeInformixTable, TypeJiraObject, TypeJSON, TypeMagentoObject, TypeMariaDBTable, TypeMarketoObject, TypeMicrosoftAccessTable, TypeMongoDbCollection, TypeMongoDbV2Collection, TypeMySQLTable, TypeNetezzaTable, TypeODataResource, TypeOdbcTable, TypeOffice365Table, TypeOracleServiceCloudObject, TypeOracleTable, TypeParquet, TypePaypalObject, TypePhoenixObject, TypePostgreSQLTable, TypePrestoObject, TypeQuickBooksObject, TypeRelationalTable, TypeResponsysObject, TypeRestResource, TypeSalesforceMarketingCloudObject, TypeSalesforceObject, TypeSalesforceServiceCloudObject, TypeSapBwCube, TypeSapCloudForCustomerResource, TypeSapEccResource, TypeSapHanaTable, TypeSapOpenHubTable, TypeSapTableResource, TypeServiceNowObject, TypeShopifyObject, TypeSparkObject, TypeSQLServerTable, TypeSquareObject, TypeSybaseTable, TypeTeradataTable, TypeVerticaTable, TypeWebTable, TypeXeroObject, TypeZohoObject}
}
// TypeBasicDatasetCompression enumerates the values for type basic dataset compression.
@@ -1927,6 +2012,8 @@ const (
TypeAzureFunction TypeBasicLinkedService = "AzureFunction"
// TypeAzureKeyVault ...
TypeAzureKeyVault TypeBasicLinkedService = "AzureKeyVault"
+ // TypeAzureMariaDB ...
+ TypeAzureMariaDB TypeBasicLinkedService = "AzureMariaDB"
// TypeAzureML ...
TypeAzureML TypeBasicLinkedService = "AzureML"
// TypeAzureMySQL ...
@@ -2089,7 +2176,7 @@ const (
// PossibleTypeBasicLinkedServiceValues returns an array of possible values for the TypeBasicLinkedService const type.
func PossibleTypeBasicLinkedServiceValues() []TypeBasicLinkedService {
- return []TypeBasicLinkedService{TypeAmazonMWS, TypeAmazonRedshift, TypeAmazonS3, TypeAzureBatch, TypeAzureBlobFS, TypeAzureBlobStorage, TypeAzureDatabricks, TypeAzureDataExplorer, TypeAzureDataLakeAnalytics, TypeAzureDataLakeStore, TypeAzureFunction, TypeAzureKeyVault, TypeAzureML, TypeAzureMySQL, TypeAzurePostgreSQL, TypeAzureSearch, TypeAzureSQLDatabase, TypeAzureSQLDW, TypeAzureSQLMI, TypeAzureStorage, TypeAzureTableStorage, TypeCassandra, TypeCommonDataServiceForApps, TypeConcur, TypeCosmosDb, TypeCosmosDbMongoDbAPI, TypeCouchbase, TypeCustomDataSource, TypeDb2, TypeDrill, TypeDynamics, TypeDynamicsAX, TypeDynamicsCrm, TypeEloqua, TypeFileServer, TypeFtpServer, TypeGoogleAdWords, TypeGoogleBigQuery, TypeGreenplum, TypeHBase, TypeHdfs, TypeHDInsight, TypeHDInsightOnDemand, TypeHive, TypeHTTPServer, TypeHubspot, TypeImpala, TypeInformix, TypeJira, TypeLinkedService, TypeMagento, TypeMariaDB, TypeMarketo, TypeMicrosoftAccess, TypeMongoDb, TypeMongoDbV2, TypeMySQL, TypeNetezza, TypeOData, TypeOdbc, TypeOffice365, TypeOracle, TypeOracleServiceCloud, TypePaypal, TypePhoenix, TypePostgreSQL, TypePresto, TypeQuickBooks, TypeResponsys, TypeRestService, TypeSalesforce, TypeSalesforceMarketingCloud, TypeSalesforceServiceCloud, TypeSapBW, TypeSapCloudForCustomer, TypeSapEcc, TypeSapHana, TypeSapOpenHub, TypeSapTable, TypeServiceNow, TypeSftp, TypeShopify, TypeSpark, TypeSQLServer, TypeSquare, TypeSybase, TypeTeradata, TypeVertica, TypeWeb, TypeXero, TypeZoho}
+ return []TypeBasicLinkedService{TypeAmazonMWS, TypeAmazonRedshift, TypeAmazonS3, TypeAzureBatch, TypeAzureBlobFS, TypeAzureBlobStorage, TypeAzureDatabricks, TypeAzureDataExplorer, TypeAzureDataLakeAnalytics, TypeAzureDataLakeStore, TypeAzureFunction, TypeAzureKeyVault, TypeAzureMariaDB, TypeAzureML, TypeAzureMySQL, TypeAzurePostgreSQL, TypeAzureSearch, TypeAzureSQLDatabase, TypeAzureSQLDW, TypeAzureSQLMI, TypeAzureStorage, TypeAzureTableStorage, TypeCassandra, TypeCommonDataServiceForApps, TypeConcur, TypeCosmosDb, TypeCosmosDbMongoDbAPI, TypeCouchbase, TypeCustomDataSource, TypeDb2, TypeDrill, TypeDynamics, TypeDynamicsAX, TypeDynamicsCrm, TypeEloqua, TypeFileServer, TypeFtpServer, TypeGoogleAdWords, TypeGoogleBigQuery, TypeGreenplum, TypeHBase, TypeHdfs, TypeHDInsight, TypeHDInsightOnDemand, TypeHive, TypeHTTPServer, TypeHubspot, TypeImpala, TypeInformix, TypeJira, TypeLinkedService, TypeMagento, TypeMariaDB, TypeMarketo, TypeMicrosoftAccess, TypeMongoDb, TypeMongoDbV2, TypeMySQL, TypeNetezza, TypeOData, TypeOdbc, TypeOffice365, TypeOracle, TypeOracleServiceCloud, TypePaypal, TypePhoenix, TypePostgreSQL, TypePresto, TypeQuickBooks, TypeResponsys, TypeRestService, TypeSalesforce, TypeSalesforceMarketingCloud, TypeSalesforceServiceCloud, TypeSapBW, TypeSapCloudForCustomer, TypeSapEcc, TypeSapHana, TypeSapOpenHub, TypeSapTable, TypeServiceNow, TypeSftp, TypeShopify, TypeSpark, TypeSQLServer, TypeSquare, TypeSybase, TypeTeradata, TypeVertica, TypeWeb, TypeXero, TypeZoho}
}
// TypeBasicSsisObjectMetadata enumerates the values for type basic ssis object metadata.
@@ -2113,6 +2200,27 @@ func PossibleTypeBasicSsisObjectMetadataValues() []TypeBasicSsisObjectMetadata {
return []TypeBasicSsisObjectMetadata{TypeEnvironment, TypeFolder, TypePackage, TypeProject, TypeSsisObjectMetadata}
}
+// TypeBasicStoreWriteSettings enumerates the values for type basic store write settings.
+type TypeBasicStoreWriteSettings string
+
+const (
+ // TypeAzureBlobFSWriteSettings ...
+ TypeAzureBlobFSWriteSettings TypeBasicStoreWriteSettings = "AzureBlobFSWriteSettings"
+ // TypeAzureBlobStorageWriteSettings ...
+ TypeAzureBlobStorageWriteSettings TypeBasicStoreWriteSettings = "AzureBlobStorageWriteSettings"
+ // TypeAzureDataLakeStoreWriteSettings ...
+ TypeAzureDataLakeStoreWriteSettings TypeBasicStoreWriteSettings = "AzureDataLakeStoreWriteSettings"
+ // TypeFileServerWriteSettings ...
+ TypeFileServerWriteSettings TypeBasicStoreWriteSettings = "FileServerWriteSettings"
+ // TypeStoreWriteSettings ...
+ TypeStoreWriteSettings TypeBasicStoreWriteSettings = "StoreWriteSettings"
+)
+
+// PossibleTypeBasicStoreWriteSettingsValues returns an array of possible values for the TypeBasicStoreWriteSettings const type.
+func PossibleTypeBasicStoreWriteSettingsValues() []TypeBasicStoreWriteSettings {
+ return []TypeBasicStoreWriteSettings{TypeAzureBlobFSWriteSettings, TypeAzureBlobStorageWriteSettings, TypeAzureDataLakeStoreWriteSettings, TypeFileServerWriteSettings, TypeStoreWriteSettings}
+}
+
// TypeBasicTrigger enumerates the values for type basic trigger.
type TypeBasicTrigger string
@@ -3091,7 +3199,7 @@ type AmazonMWSLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -3243,6 +3351,11 @@ func (amls AmazonMWSLinkedService) AsMarketoLinkedService() (*MarketoLinkedServi
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for AmazonMWSLinkedService.
+func (amls AmazonMWSLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for AmazonMWSLinkedService.
func (amls AmazonMWSLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -3811,7 +3924,7 @@ type AmazonMWSObjectDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -3947,6 +4060,11 @@ func (amod AmazonMWSObjectDataset) AsMarketoObjectDataset() (*MarketoObjectDatas
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for AmazonMWSObjectDataset.
+func (amod AmazonMWSObjectDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for AmazonMWSObjectDataset.
func (amod AmazonMWSObjectDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -4072,6 +4190,16 @@ func (amod AmazonMWSObjectDataset) AsSapCloudForCustomerResourceDataset() (*SapC
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for AmazonMWSObjectDataset.
+func (amod AmazonMWSObjectDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for AmazonMWSObjectDataset.
+func (amod AmazonMWSObjectDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for AmazonMWSObjectDataset.
func (amod AmazonMWSObjectDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -4112,6 +4240,16 @@ func (amod AmazonMWSObjectDataset) AsRelationalTableDataset() (*RelationalTableD
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for AmazonMWSObjectDataset.
+func (amod AmazonMWSObjectDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for AmazonMWSObjectDataset.
+func (amod AmazonMWSObjectDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for AmazonMWSObjectDataset.
func (amod AmazonMWSObjectDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -4227,6 +4365,11 @@ func (amod AmazonMWSObjectDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for AmazonMWSObjectDataset.
+func (amod AmazonMWSObjectDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for AmazonMWSObjectDataset.
func (amod AmazonMWSObjectDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -4237,6 +4380,11 @@ func (amod AmazonMWSObjectDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for AmazonMWSObjectDataset.
+func (amod AmazonMWSObjectDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for AmazonMWSObjectDataset.
func (amod AmazonMWSObjectDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -4372,7 +4520,7 @@ type AmazonMWSSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -4496,6 +4644,11 @@ func (ams AmazonMWSSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for AmazonMWSSource.
+func (ams AmazonMWSSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for AmazonMWSSource.
func (ams AmazonMWSSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -4716,6 +4869,11 @@ func (ams AmazonMWSSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for AmazonMWSSource.
+func (ams AmazonMWSSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for AmazonMWSSource.
func (ams AmazonMWSSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -4791,6 +4949,11 @@ func (ams AmazonMWSSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for AmazonMWSSource.
+func (ams AmazonMWSSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for AmazonMWSSource.
func (ams AmazonMWSSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -4801,6 +4964,11 @@ func (ams AmazonMWSSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for AmazonMWSSource.
+func (ams AmazonMWSSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for AmazonMWSSource.
func (ams AmazonMWSSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -4897,7 +5065,7 @@ type AmazonRedshiftLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -5049,6 +5217,11 @@ func (arls AmazonRedshiftLinkedService) AsMarketoLinkedService() (*MarketoLinked
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for AmazonRedshiftLinkedService.
+func (arls AmazonRedshiftLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for AmazonRedshiftLinkedService.
func (arls AmazonRedshiftLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -5568,7 +5741,7 @@ type AmazonRedshiftSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -5695,6 +5868,11 @@ func (ars AmazonRedshiftSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for AmazonRedshiftSource.
+func (ars AmazonRedshiftSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for AmazonRedshiftSource.
func (ars AmazonRedshiftSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -5915,6 +6093,11 @@ func (ars AmazonRedshiftSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for AmazonRedshiftSource.
+func (ars AmazonRedshiftSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for AmazonRedshiftSource.
func (ars AmazonRedshiftSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -5990,6 +6173,11 @@ func (ars AmazonRedshiftSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for AmazonRedshiftSource.
+func (ars AmazonRedshiftSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for AmazonRedshiftSource.
func (ars AmazonRedshiftSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -6000,6 +6188,11 @@ func (ars AmazonRedshiftSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for AmazonRedshiftSource.
+func (ars AmazonRedshiftSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for AmazonRedshiftSource.
func (ars AmazonRedshiftSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -6091,6 +6284,620 @@ func (ars *AmazonRedshiftSource) UnmarshalJSON(body []byte) error {
return nil
}
+// AmazonRedshiftTableDataset the Amazon Redshift table dataset.
+type AmazonRedshiftTableDataset struct {
+ // AmazonRedshiftTableDatasetTypeProperties - Amazon Redshift table dataset properties.
+ *AmazonRedshiftTableDatasetTypeProperties `json:"typeProperties,omitempty"`
+ // AdditionalProperties - Unmatched properties from the message are deserialized this collection
+ AdditionalProperties map[string]interface{} `json:""`
+ // Description - Dataset description.
+ Description *string `json:"description,omitempty"`
+ // Structure - Columns that define the structure of the dataset. Type: array (or Expression with resultType array), itemType: DatasetDataElement.
+ Structure interface{} `json:"structure,omitempty"`
+ // Schema - Columns that define the physical type schema of the dataset. Type: array (or Expression with resultType array), itemType: DatasetSchemaDataElement.
+ Schema interface{} `json:"schema,omitempty"`
+ // LinkedServiceName - Linked service reference.
+ LinkedServiceName *LinkedServiceReference `json:"linkedServiceName,omitempty"`
+ // Parameters - Parameters for dataset.
+ Parameters map[string]*ParameterSpecification `json:"parameters"`
+ // Annotations - List of tags that can be used for describing the Dataset.
+ Annotations *[]interface{} `json:"annotations,omitempty"`
+ // Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
+ Folder *DatasetFolder `json:"folder,omitempty"`
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
+ Type TypeBasicDataset `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) MarshalJSON() ([]byte, error) {
+ artd.Type = TypeAmazonRedshiftTable
+ objectMap := make(map[string]interface{})
+ if artd.AmazonRedshiftTableDatasetTypeProperties != nil {
+ objectMap["typeProperties"] = artd.AmazonRedshiftTableDatasetTypeProperties
+ }
+ if artd.Description != nil {
+ objectMap["description"] = artd.Description
+ }
+ if artd.Structure != nil {
+ objectMap["structure"] = artd.Structure
+ }
+ if artd.Schema != nil {
+ objectMap["schema"] = artd.Schema
+ }
+ if artd.LinkedServiceName != nil {
+ objectMap["linkedServiceName"] = artd.LinkedServiceName
+ }
+ if artd.Parameters != nil {
+ objectMap["parameters"] = artd.Parameters
+ }
+ if artd.Annotations != nil {
+ objectMap["annotations"] = artd.Annotations
+ }
+ if artd.Folder != nil {
+ objectMap["folder"] = artd.Folder
+ }
+ if artd.Type != "" {
+ objectMap["type"] = artd.Type
+ }
+ for k, v := range artd.AdditionalProperties {
+ objectMap[k] = v
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsGoogleAdWordsObjectDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsGoogleAdWordsObjectDataset() (*GoogleAdWordsObjectDataset, bool) {
+ return nil, false
+}
+
+// AsAzureDataExplorerTableDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsAzureDataExplorerTableDataset() (*AzureDataExplorerTableDataset, bool) {
+ return nil, false
+}
+
+// AsOracleServiceCloudObjectDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsOracleServiceCloudObjectDataset() (*OracleServiceCloudObjectDataset, bool) {
+ return nil, false
+}
+
+// AsDynamicsAXResourceDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsDynamicsAXResourceDataset() (*DynamicsAXResourceDataset, bool) {
+ return nil, false
+}
+
+// AsResponsysObjectDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsResponsysObjectDataset() (*ResponsysObjectDataset, bool) {
+ return nil, false
+}
+
+// AsSalesforceMarketingCloudObjectDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsSalesforceMarketingCloudObjectDataset() (*SalesforceMarketingCloudObjectDataset, bool) {
+ return nil, false
+}
+
+// AsVerticaTableDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsVerticaTableDataset() (*VerticaTableDataset, bool) {
+ return nil, false
+}
+
+// AsNetezzaTableDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsNetezzaTableDataset() (*NetezzaTableDataset, bool) {
+ return nil, false
+}
+
+// AsZohoObjectDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsZohoObjectDataset() (*ZohoObjectDataset, bool) {
+ return nil, false
+}
+
+// AsXeroObjectDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsXeroObjectDataset() (*XeroObjectDataset, bool) {
+ return nil, false
+}
+
+// AsSquareObjectDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsSquareObjectDataset() (*SquareObjectDataset, bool) {
+ return nil, false
+}
+
+// AsSparkObjectDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsSparkObjectDataset() (*SparkObjectDataset, bool) {
+ return nil, false
+}
+
+// AsShopifyObjectDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsShopifyObjectDataset() (*ShopifyObjectDataset, bool) {
+ return nil, false
+}
+
+// AsServiceNowObjectDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsServiceNowObjectDataset() (*ServiceNowObjectDataset, bool) {
+ return nil, false
+}
+
+// AsQuickBooksObjectDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsQuickBooksObjectDataset() (*QuickBooksObjectDataset, bool) {
+ return nil, false
+}
+
+// AsPrestoObjectDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsPrestoObjectDataset() (*PrestoObjectDataset, bool) {
+ return nil, false
+}
+
+// AsPhoenixObjectDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsPhoenixObjectDataset() (*PhoenixObjectDataset, bool) {
+ return nil, false
+}
+
+// AsPaypalObjectDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsPaypalObjectDataset() (*PaypalObjectDataset, bool) {
+ return nil, false
+}
+
+// AsMarketoObjectDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsMarketoObjectDataset() (*MarketoObjectDataset, bool) {
+ return nil, false
+}
+
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
+// AsMariaDBTableDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
+ return nil, false
+}
+
+// AsMagentoObjectDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsMagentoObjectDataset() (*MagentoObjectDataset, bool) {
+ return nil, false
+}
+
+// AsJiraObjectDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsJiraObjectDataset() (*JiraObjectDataset, bool) {
+ return nil, false
+}
+
+// AsImpalaObjectDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsImpalaObjectDataset() (*ImpalaObjectDataset, bool) {
+ return nil, false
+}
+
+// AsHubspotObjectDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsHubspotObjectDataset() (*HubspotObjectDataset, bool) {
+ return nil, false
+}
+
+// AsHiveObjectDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsHiveObjectDataset() (*HiveObjectDataset, bool) {
+ return nil, false
+}
+
+// AsHBaseObjectDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsHBaseObjectDataset() (*HBaseObjectDataset, bool) {
+ return nil, false
+}
+
+// AsGreenplumTableDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsGreenplumTableDataset() (*GreenplumTableDataset, bool) {
+ return nil, false
+}
+
+// AsGoogleBigQueryObjectDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsGoogleBigQueryObjectDataset() (*GoogleBigQueryObjectDataset, bool) {
+ return nil, false
+}
+
+// AsEloquaObjectDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsEloquaObjectDataset() (*EloquaObjectDataset, bool) {
+ return nil, false
+}
+
+// AsDrillTableDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsDrillTableDataset() (*DrillTableDataset, bool) {
+ return nil, false
+}
+
+// AsCouchbaseTableDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsCouchbaseTableDataset() (*CouchbaseTableDataset, bool) {
+ return nil, false
+}
+
+// AsConcurObjectDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsConcurObjectDataset() (*ConcurObjectDataset, bool) {
+ return nil, false
+}
+
+// AsAzurePostgreSQLTableDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsAzurePostgreSQLTableDataset() (*AzurePostgreSQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonMWSObjectDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsAmazonMWSObjectDataset() (*AmazonMWSObjectDataset, bool) {
+ return nil, false
+}
+
+// AsHTTPDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsHTTPDataset() (*HTTPDataset, bool) {
+ return nil, false
+}
+
+// AsAzureSearchIndexDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsAzureSearchIndexDataset() (*AzureSearchIndexDataset, bool) {
+ return nil, false
+}
+
+// AsWebTableDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsWebTableDataset() (*WebTableDataset, bool) {
+ return nil, false
+}
+
+// AsSapTableResourceDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsSapTableResourceDataset() (*SapTableResourceDataset, bool) {
+ return nil, false
+}
+
+// AsRestResourceDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsRestResourceDataset() (*RestResourceDataset, bool) {
+ return nil, false
+}
+
+// AsSQLServerTableDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsSQLServerTableDataset() (*SQLServerTableDataset, bool) {
+ return nil, false
+}
+
+// AsSapOpenHubTableDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsSapOpenHubTableDataset() (*SapOpenHubTableDataset, bool) {
+ return nil, false
+}
+
+// AsSapHanaTableDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsSapHanaTableDataset() (*SapHanaTableDataset, bool) {
+ return nil, false
+}
+
+// AsSapEccResourceDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsSapEccResourceDataset() (*SapEccResourceDataset, bool) {
+ return nil, false
+}
+
+// AsSapCloudForCustomerResourceDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsSapCloudForCustomerResourceDataset() (*SapCloudForCustomerResourceDataset, bool) {
+ return nil, false
+}
+
+// AsSapBwCubeDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
+// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
+ return nil, false
+}
+
+// AsSalesforceObjectDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsSalesforceObjectDataset() (*SalesforceObjectDataset, bool) {
+ return nil, false
+}
+
+// AsMicrosoftAccessTableDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsMicrosoftAccessTableDataset() (*MicrosoftAccessTableDataset, bool) {
+ return nil, false
+}
+
+// AsPostgreSQLTableDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsPostgreSQLTableDataset() (*PostgreSQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsMySQLTableDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsMySQLTableDataset() (*MySQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsOdbcTableDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsOdbcTableDataset() (*OdbcTableDataset, bool) {
+ return nil, false
+}
+
+// AsInformixTableDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsInformixTableDataset() (*InformixTableDataset, bool) {
+ return nil, false
+}
+
+// AsRelationalTableDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsRelationalTableDataset() (*RelationalTableDataset, bool) {
+ return nil, false
+}
+
+// AsDb2TableDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return &artd, true
+}
+
+// AsAzureMySQLTableDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsTeradataTableDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsTeradataTableDataset() (*TeradataTableDataset, bool) {
+ return nil, false
+}
+
+// AsOracleTableDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsOracleTableDataset() (*OracleTableDataset, bool) {
+ return nil, false
+}
+
+// AsODataResourceDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsODataResourceDataset() (*ODataResourceDataset, bool) {
+ return nil, false
+}
+
+// AsCosmosDbMongoDbAPICollectionDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsCosmosDbMongoDbAPICollectionDataset() (*CosmosDbMongoDbAPICollectionDataset, bool) {
+ return nil, false
+}
+
+// AsMongoDbV2CollectionDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsMongoDbV2CollectionDataset() (*MongoDbV2CollectionDataset, bool) {
+ return nil, false
+}
+
+// AsMongoDbCollectionDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsMongoDbCollectionDataset() (*MongoDbCollectionDataset, bool) {
+ return nil, false
+}
+
+// AsFileShareDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsFileShareDataset() (*FileShareDataset, bool) {
+ return nil, false
+}
+
+// AsOffice365Dataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsOffice365Dataset() (*Office365Dataset, bool) {
+ return nil, false
+}
+
+// AsAzureBlobFSDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsAzureBlobFSDataset() (*AzureBlobFSDataset, bool) {
+ return nil, false
+}
+
+// AsAzureDataLakeStoreDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsAzureDataLakeStoreDataset() (*AzureDataLakeStoreDataset, bool) {
+ return nil, false
+}
+
+// AsCommonDataServiceForAppsEntityDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsCommonDataServiceForAppsEntityDataset() (*CommonDataServiceForAppsEntityDataset, bool) {
+ return nil, false
+}
+
+// AsDynamicsCrmEntityDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsDynamicsCrmEntityDataset() (*DynamicsCrmEntityDataset, bool) {
+ return nil, false
+}
+
+// AsDynamicsEntityDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsDynamicsEntityDataset() (*DynamicsEntityDataset, bool) {
+ return nil, false
+}
+
+// AsDocumentDbCollectionDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsDocumentDbCollectionDataset() (*DocumentDbCollectionDataset, bool) {
+ return nil, false
+}
+
+// AsCustomDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsCustomDataset() (*CustomDataset, bool) {
+ return nil, false
+}
+
+// AsCassandraTableDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsCassandraTableDataset() (*CassandraTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureSQLDWTableDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsAzureSQLDWTableDataset() (*AzureSQLDWTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureSQLMITableDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsAzureSQLMITableDataset() (*AzureSQLMITableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureSQLTableDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsAzureSQLTableDataset() (*AzureSQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureTableDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsAzureTableDataset() (*AzureTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureBlobDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsAzureBlobDataset() (*AzureBlobDataset, bool) {
+ return nil, false
+}
+
+// AsBinaryDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsBinaryDataset() (*BinaryDataset, bool) {
+ return nil, false
+}
+
+// AsJSONDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
+// AsDelimitedTextDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
+ return nil, false
+}
+
+// AsParquetDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsParquetDataset() (*ParquetDataset, bool) {
+ return nil, false
+}
+
+// AsAvroDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonS3Dataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
+ return nil, false
+}
+
+// AsDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsDataset() (*Dataset, bool) {
+ return nil, false
+}
+
+// AsBasicDataset is the BasicDataset implementation for AmazonRedshiftTableDataset.
+func (artd AmazonRedshiftTableDataset) AsBasicDataset() (BasicDataset, bool) {
+ return &artd, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for AmazonRedshiftTableDataset struct.
+func (artd *AmazonRedshiftTableDataset) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "typeProperties":
+ if v != nil {
+ var amazonRedshiftTableDatasetTypeProperties AmazonRedshiftTableDatasetTypeProperties
+ err = json.Unmarshal(*v, &amazonRedshiftTableDatasetTypeProperties)
+ if err != nil {
+ return err
+ }
+ artd.AmazonRedshiftTableDatasetTypeProperties = &amazonRedshiftTableDatasetTypeProperties
+ }
+ default:
+ if v != nil {
+ var additionalProperties interface{}
+ err = json.Unmarshal(*v, &additionalProperties)
+ if err != nil {
+ return err
+ }
+ if artd.AdditionalProperties == nil {
+ artd.AdditionalProperties = make(map[string]interface{})
+ }
+ artd.AdditionalProperties[k] = additionalProperties
+ }
+ case "description":
+ if v != nil {
+ var description string
+ err = json.Unmarshal(*v, &description)
+ if err != nil {
+ return err
+ }
+ artd.Description = &description
+ }
+ case "structure":
+ if v != nil {
+ var structure interface{}
+ err = json.Unmarshal(*v, &structure)
+ if err != nil {
+ return err
+ }
+ artd.Structure = structure
+ }
+ case "schema":
+ if v != nil {
+ var schema interface{}
+ err = json.Unmarshal(*v, &schema)
+ if err != nil {
+ return err
+ }
+ artd.Schema = schema
+ }
+ case "linkedServiceName":
+ if v != nil {
+ var linkedServiceName LinkedServiceReference
+ err = json.Unmarshal(*v, &linkedServiceName)
+ if err != nil {
+ return err
+ }
+ artd.LinkedServiceName = &linkedServiceName
+ }
+ case "parameters":
+ if v != nil {
+ var parameters map[string]*ParameterSpecification
+ err = json.Unmarshal(*v, ¶meters)
+ if err != nil {
+ return err
+ }
+ artd.Parameters = parameters
+ }
+ case "annotations":
+ if v != nil {
+ var annotations []interface{}
+ err = json.Unmarshal(*v, &annotations)
+ if err != nil {
+ return err
+ }
+ artd.Annotations = &annotations
+ }
+ case "folder":
+ if v != nil {
+ var folder DatasetFolder
+ err = json.Unmarshal(*v, &folder)
+ if err != nil {
+ return err
+ }
+ artd.Folder = &folder
+ }
+ case "type":
+ if v != nil {
+ var typeVar TypeBasicDataset
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ artd.Type = typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// AmazonRedshiftTableDatasetTypeProperties amazon Redshift table dataset properties.
+type AmazonRedshiftTableDatasetTypeProperties struct {
+ // TableName - This property will be retired. Please consider using schema + table properties instead.
+ TableName interface{} `json:"tableName,omitempty"`
+ // Table - The Amazon Redshift table name. Type: string (or Expression with resultType string).
+ Table interface{} `json:"table,omitempty"`
+ // Schema - The Amazon Redshift schema name. Type: string (or Expression with resultType string).
+ Schema interface{} `json:"schema,omitempty"`
+}
+
// AmazonS3Dataset a single Amazon Simple Storage Service (S3) object or a set of S3 objects.
type AmazonS3Dataset struct {
// AmazonS3DatasetTypeProperties - Amazon S3 dataset properties.
@@ -6111,7 +6918,7 @@ type AmazonS3Dataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -6247,6 +7054,11 @@ func (asd AmazonS3Dataset) AsMarketoObjectDataset() (*MarketoObjectDataset, bool
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for AmazonS3Dataset.
+func (asd AmazonS3Dataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for AmazonS3Dataset.
func (asd AmazonS3Dataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -6372,6 +7184,16 @@ func (asd AmazonS3Dataset) AsSapCloudForCustomerResourceDataset() (*SapCloudForC
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for AmazonS3Dataset.
+func (asd AmazonS3Dataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for AmazonS3Dataset.
+func (asd AmazonS3Dataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for AmazonS3Dataset.
func (asd AmazonS3Dataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -6412,6 +7234,16 @@ func (asd AmazonS3Dataset) AsRelationalTableDataset() (*RelationalTableDataset,
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for AmazonS3Dataset.
+func (asd AmazonS3Dataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for AmazonS3Dataset.
+func (asd AmazonS3Dataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for AmazonS3Dataset.
func (asd AmazonS3Dataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -6527,6 +7359,11 @@ func (asd AmazonS3Dataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for AmazonS3Dataset.
+func (asd AmazonS3Dataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for AmazonS3Dataset.
func (asd AmazonS3Dataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -6537,6 +7374,11 @@ func (asd AmazonS3Dataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for AmazonS3Dataset.
+func (asd AmazonS3Dataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for AmazonS3Dataset.
func (asd AmazonS3Dataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return &asd, true
@@ -6779,7 +7621,7 @@ type AmazonS3LinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -6931,6 +7773,11 @@ func (asls AmazonS3LinkedService) AsMarketoLinkedService() (*MarketoLinkedServic
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for AmazonS3LinkedService.
+func (asls AmazonS3LinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for AmazonS3LinkedService.
func (asls AmazonS3LinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -8014,6 +8861,619 @@ type AppendVariableActivityTypeProperties struct {
Value interface{} `json:"value,omitempty"`
}
+// AvroDataset avro dataset.
+type AvroDataset struct {
+ // AvroDatasetTypeProperties - Avro dataset properties.
+ *AvroDatasetTypeProperties `json:"typeProperties,omitempty"`
+ // AdditionalProperties - Unmatched properties from the message are deserialized this collection
+ AdditionalProperties map[string]interface{} `json:""`
+ // Description - Dataset description.
+ Description *string `json:"description,omitempty"`
+ // Structure - Columns that define the structure of the dataset. Type: array (or Expression with resultType array), itemType: DatasetDataElement.
+ Structure interface{} `json:"structure,omitempty"`
+ // Schema - Columns that define the physical type schema of the dataset. Type: array (or Expression with resultType array), itemType: DatasetSchemaDataElement.
+ Schema interface{} `json:"schema,omitempty"`
+ // LinkedServiceName - Linked service reference.
+ LinkedServiceName *LinkedServiceReference `json:"linkedServiceName,omitempty"`
+ // Parameters - Parameters for dataset.
+ Parameters map[string]*ParameterSpecification `json:"parameters"`
+ // Annotations - List of tags that can be used for describing the Dataset.
+ Annotations *[]interface{} `json:"annotations,omitempty"`
+ // Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
+ Folder *DatasetFolder `json:"folder,omitempty"`
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
+ Type TypeBasicDataset `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for AvroDataset.
+func (ad AvroDataset) MarshalJSON() ([]byte, error) {
+ ad.Type = TypeAvro
+ objectMap := make(map[string]interface{})
+ if ad.AvroDatasetTypeProperties != nil {
+ objectMap["typeProperties"] = ad.AvroDatasetTypeProperties
+ }
+ if ad.Description != nil {
+ objectMap["description"] = ad.Description
+ }
+ if ad.Structure != nil {
+ objectMap["structure"] = ad.Structure
+ }
+ if ad.Schema != nil {
+ objectMap["schema"] = ad.Schema
+ }
+ if ad.LinkedServiceName != nil {
+ objectMap["linkedServiceName"] = ad.LinkedServiceName
+ }
+ if ad.Parameters != nil {
+ objectMap["parameters"] = ad.Parameters
+ }
+ if ad.Annotations != nil {
+ objectMap["annotations"] = ad.Annotations
+ }
+ if ad.Folder != nil {
+ objectMap["folder"] = ad.Folder
+ }
+ if ad.Type != "" {
+ objectMap["type"] = ad.Type
+ }
+ for k, v := range ad.AdditionalProperties {
+ objectMap[k] = v
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsGoogleAdWordsObjectDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsGoogleAdWordsObjectDataset() (*GoogleAdWordsObjectDataset, bool) {
+ return nil, false
+}
+
+// AsAzureDataExplorerTableDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsAzureDataExplorerTableDataset() (*AzureDataExplorerTableDataset, bool) {
+ return nil, false
+}
+
+// AsOracleServiceCloudObjectDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsOracleServiceCloudObjectDataset() (*OracleServiceCloudObjectDataset, bool) {
+ return nil, false
+}
+
+// AsDynamicsAXResourceDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsDynamicsAXResourceDataset() (*DynamicsAXResourceDataset, bool) {
+ return nil, false
+}
+
+// AsResponsysObjectDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsResponsysObjectDataset() (*ResponsysObjectDataset, bool) {
+ return nil, false
+}
+
+// AsSalesforceMarketingCloudObjectDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsSalesforceMarketingCloudObjectDataset() (*SalesforceMarketingCloudObjectDataset, bool) {
+ return nil, false
+}
+
+// AsVerticaTableDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsVerticaTableDataset() (*VerticaTableDataset, bool) {
+ return nil, false
+}
+
+// AsNetezzaTableDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsNetezzaTableDataset() (*NetezzaTableDataset, bool) {
+ return nil, false
+}
+
+// AsZohoObjectDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsZohoObjectDataset() (*ZohoObjectDataset, bool) {
+ return nil, false
+}
+
+// AsXeroObjectDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsXeroObjectDataset() (*XeroObjectDataset, bool) {
+ return nil, false
+}
+
+// AsSquareObjectDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsSquareObjectDataset() (*SquareObjectDataset, bool) {
+ return nil, false
+}
+
+// AsSparkObjectDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsSparkObjectDataset() (*SparkObjectDataset, bool) {
+ return nil, false
+}
+
+// AsShopifyObjectDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsShopifyObjectDataset() (*ShopifyObjectDataset, bool) {
+ return nil, false
+}
+
+// AsServiceNowObjectDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsServiceNowObjectDataset() (*ServiceNowObjectDataset, bool) {
+ return nil, false
+}
+
+// AsQuickBooksObjectDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsQuickBooksObjectDataset() (*QuickBooksObjectDataset, bool) {
+ return nil, false
+}
+
+// AsPrestoObjectDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsPrestoObjectDataset() (*PrestoObjectDataset, bool) {
+ return nil, false
+}
+
+// AsPhoenixObjectDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsPhoenixObjectDataset() (*PhoenixObjectDataset, bool) {
+ return nil, false
+}
+
+// AsPaypalObjectDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsPaypalObjectDataset() (*PaypalObjectDataset, bool) {
+ return nil, false
+}
+
+// AsMarketoObjectDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsMarketoObjectDataset() (*MarketoObjectDataset, bool) {
+ return nil, false
+}
+
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
+// AsMariaDBTableDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
+ return nil, false
+}
+
+// AsMagentoObjectDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsMagentoObjectDataset() (*MagentoObjectDataset, bool) {
+ return nil, false
+}
+
+// AsJiraObjectDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsJiraObjectDataset() (*JiraObjectDataset, bool) {
+ return nil, false
+}
+
+// AsImpalaObjectDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsImpalaObjectDataset() (*ImpalaObjectDataset, bool) {
+ return nil, false
+}
+
+// AsHubspotObjectDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsHubspotObjectDataset() (*HubspotObjectDataset, bool) {
+ return nil, false
+}
+
+// AsHiveObjectDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsHiveObjectDataset() (*HiveObjectDataset, bool) {
+ return nil, false
+}
+
+// AsHBaseObjectDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsHBaseObjectDataset() (*HBaseObjectDataset, bool) {
+ return nil, false
+}
+
+// AsGreenplumTableDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsGreenplumTableDataset() (*GreenplumTableDataset, bool) {
+ return nil, false
+}
+
+// AsGoogleBigQueryObjectDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsGoogleBigQueryObjectDataset() (*GoogleBigQueryObjectDataset, bool) {
+ return nil, false
+}
+
+// AsEloquaObjectDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsEloquaObjectDataset() (*EloquaObjectDataset, bool) {
+ return nil, false
+}
+
+// AsDrillTableDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsDrillTableDataset() (*DrillTableDataset, bool) {
+ return nil, false
+}
+
+// AsCouchbaseTableDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsCouchbaseTableDataset() (*CouchbaseTableDataset, bool) {
+ return nil, false
+}
+
+// AsConcurObjectDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsConcurObjectDataset() (*ConcurObjectDataset, bool) {
+ return nil, false
+}
+
+// AsAzurePostgreSQLTableDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsAzurePostgreSQLTableDataset() (*AzurePostgreSQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonMWSObjectDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsAmazonMWSObjectDataset() (*AmazonMWSObjectDataset, bool) {
+ return nil, false
+}
+
+// AsHTTPDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsHTTPDataset() (*HTTPDataset, bool) {
+ return nil, false
+}
+
+// AsAzureSearchIndexDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsAzureSearchIndexDataset() (*AzureSearchIndexDataset, bool) {
+ return nil, false
+}
+
+// AsWebTableDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsWebTableDataset() (*WebTableDataset, bool) {
+ return nil, false
+}
+
+// AsSapTableResourceDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsSapTableResourceDataset() (*SapTableResourceDataset, bool) {
+ return nil, false
+}
+
+// AsRestResourceDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsRestResourceDataset() (*RestResourceDataset, bool) {
+ return nil, false
+}
+
+// AsSQLServerTableDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsSQLServerTableDataset() (*SQLServerTableDataset, bool) {
+ return nil, false
+}
+
+// AsSapOpenHubTableDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsSapOpenHubTableDataset() (*SapOpenHubTableDataset, bool) {
+ return nil, false
+}
+
+// AsSapHanaTableDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsSapHanaTableDataset() (*SapHanaTableDataset, bool) {
+ return nil, false
+}
+
+// AsSapEccResourceDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsSapEccResourceDataset() (*SapEccResourceDataset, bool) {
+ return nil, false
+}
+
+// AsSapCloudForCustomerResourceDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsSapCloudForCustomerResourceDataset() (*SapCloudForCustomerResourceDataset, bool) {
+ return nil, false
+}
+
+// AsSapBwCubeDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
+// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
+ return nil, false
+}
+
+// AsSalesforceObjectDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsSalesforceObjectDataset() (*SalesforceObjectDataset, bool) {
+ return nil, false
+}
+
+// AsMicrosoftAccessTableDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsMicrosoftAccessTableDataset() (*MicrosoftAccessTableDataset, bool) {
+ return nil, false
+}
+
+// AsPostgreSQLTableDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsPostgreSQLTableDataset() (*PostgreSQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsMySQLTableDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsMySQLTableDataset() (*MySQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsOdbcTableDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsOdbcTableDataset() (*OdbcTableDataset, bool) {
+ return nil, false
+}
+
+// AsInformixTableDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsInformixTableDataset() (*InformixTableDataset, bool) {
+ return nil, false
+}
+
+// AsRelationalTableDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsRelationalTableDataset() (*RelationalTableDataset, bool) {
+ return nil, false
+}
+
+// AsDb2TableDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureMySQLTableDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsTeradataTableDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsTeradataTableDataset() (*TeradataTableDataset, bool) {
+ return nil, false
+}
+
+// AsOracleTableDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsOracleTableDataset() (*OracleTableDataset, bool) {
+ return nil, false
+}
+
+// AsODataResourceDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsODataResourceDataset() (*ODataResourceDataset, bool) {
+ return nil, false
+}
+
+// AsCosmosDbMongoDbAPICollectionDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsCosmosDbMongoDbAPICollectionDataset() (*CosmosDbMongoDbAPICollectionDataset, bool) {
+ return nil, false
+}
+
+// AsMongoDbV2CollectionDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsMongoDbV2CollectionDataset() (*MongoDbV2CollectionDataset, bool) {
+ return nil, false
+}
+
+// AsMongoDbCollectionDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsMongoDbCollectionDataset() (*MongoDbCollectionDataset, bool) {
+ return nil, false
+}
+
+// AsFileShareDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsFileShareDataset() (*FileShareDataset, bool) {
+ return nil, false
+}
+
+// AsOffice365Dataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsOffice365Dataset() (*Office365Dataset, bool) {
+ return nil, false
+}
+
+// AsAzureBlobFSDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsAzureBlobFSDataset() (*AzureBlobFSDataset, bool) {
+ return nil, false
+}
+
+// AsAzureDataLakeStoreDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsAzureDataLakeStoreDataset() (*AzureDataLakeStoreDataset, bool) {
+ return nil, false
+}
+
+// AsCommonDataServiceForAppsEntityDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsCommonDataServiceForAppsEntityDataset() (*CommonDataServiceForAppsEntityDataset, bool) {
+ return nil, false
+}
+
+// AsDynamicsCrmEntityDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsDynamicsCrmEntityDataset() (*DynamicsCrmEntityDataset, bool) {
+ return nil, false
+}
+
+// AsDynamicsEntityDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsDynamicsEntityDataset() (*DynamicsEntityDataset, bool) {
+ return nil, false
+}
+
+// AsDocumentDbCollectionDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsDocumentDbCollectionDataset() (*DocumentDbCollectionDataset, bool) {
+ return nil, false
+}
+
+// AsCustomDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsCustomDataset() (*CustomDataset, bool) {
+ return nil, false
+}
+
+// AsCassandraTableDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsCassandraTableDataset() (*CassandraTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureSQLDWTableDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsAzureSQLDWTableDataset() (*AzureSQLDWTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureSQLMITableDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsAzureSQLMITableDataset() (*AzureSQLMITableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureSQLTableDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsAzureSQLTableDataset() (*AzureSQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureTableDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsAzureTableDataset() (*AzureTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureBlobDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsAzureBlobDataset() (*AzureBlobDataset, bool) {
+ return nil, false
+}
+
+// AsBinaryDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsBinaryDataset() (*BinaryDataset, bool) {
+ return nil, false
+}
+
+// AsJSONDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
+// AsDelimitedTextDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
+ return nil, false
+}
+
+// AsParquetDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsParquetDataset() (*ParquetDataset, bool) {
+ return nil, false
+}
+
+// AsAvroDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return &ad, true
+}
+
+// AsAmazonS3Dataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
+ return nil, false
+}
+
+// AsDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsDataset() (*Dataset, bool) {
+ return nil, false
+}
+
+// AsBasicDataset is the BasicDataset implementation for AvroDataset.
+func (ad AvroDataset) AsBasicDataset() (BasicDataset, bool) {
+ return &ad, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for AvroDataset struct.
+func (ad *AvroDataset) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "typeProperties":
+ if v != nil {
+ var avroDatasetTypeProperties AvroDatasetTypeProperties
+ err = json.Unmarshal(*v, &avroDatasetTypeProperties)
+ if err != nil {
+ return err
+ }
+ ad.AvroDatasetTypeProperties = &avroDatasetTypeProperties
+ }
+ default:
+ if v != nil {
+ var additionalProperties interface{}
+ err = json.Unmarshal(*v, &additionalProperties)
+ if err != nil {
+ return err
+ }
+ if ad.AdditionalProperties == nil {
+ ad.AdditionalProperties = make(map[string]interface{})
+ }
+ ad.AdditionalProperties[k] = additionalProperties
+ }
+ case "description":
+ if v != nil {
+ var description string
+ err = json.Unmarshal(*v, &description)
+ if err != nil {
+ return err
+ }
+ ad.Description = &description
+ }
+ case "structure":
+ if v != nil {
+ var structure interface{}
+ err = json.Unmarshal(*v, &structure)
+ if err != nil {
+ return err
+ }
+ ad.Structure = structure
+ }
+ case "schema":
+ if v != nil {
+ var schema interface{}
+ err = json.Unmarshal(*v, &schema)
+ if err != nil {
+ return err
+ }
+ ad.Schema = schema
+ }
+ case "linkedServiceName":
+ if v != nil {
+ var linkedServiceName LinkedServiceReference
+ err = json.Unmarshal(*v, &linkedServiceName)
+ if err != nil {
+ return err
+ }
+ ad.LinkedServiceName = &linkedServiceName
+ }
+ case "parameters":
+ if v != nil {
+ var parameters map[string]*ParameterSpecification
+ err = json.Unmarshal(*v, ¶meters)
+ if err != nil {
+ return err
+ }
+ ad.Parameters = parameters
+ }
+ case "annotations":
+ if v != nil {
+ var annotations []interface{}
+ err = json.Unmarshal(*v, &annotations)
+ if err != nil {
+ return err
+ }
+ ad.Annotations = &annotations
+ }
+ case "folder":
+ if v != nil {
+ var folder DatasetFolder
+ err = json.Unmarshal(*v, &folder)
+ if err != nil {
+ return err
+ }
+ ad.Folder = &folder
+ }
+ case "type":
+ if v != nil {
+ var typeVar TypeBasicDataset
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ ad.Type = typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// AvroDatasetTypeProperties avro dataset properties.
+type AvroDatasetTypeProperties struct {
+ // Location - The location of the avro storage.
+ Location *DatasetLocation `json:"location,omitempty"`
+ // AvroCompressionCodec - Possible values include: 'None', 'Deflate', 'Snappy', 'Xz', 'Bzip2'
+ AvroCompressionCodec AvroCompressionCodec `json:"avroCompressionCodec,omitempty"`
+ AvroCompressionLevel *int32 `json:"avroCompressionLevel,omitempty"`
+}
+
// AvroFormat the data stored in Avro format.
type AvroFormat struct {
// AdditionalProperties - Unmatched properties from the message are deserialized this collection
@@ -8134,6 +9594,955 @@ func (af *AvroFormat) UnmarshalJSON(body []byte) error {
return nil
}
+// AvroSink a copy activity Avro sink.
+type AvroSink struct {
+ // StoreSettings - Avro store settings.
+ StoreSettings BasicStoreWriteSettings `json:"storeSettings,omitempty"`
+ // FormatSettings - Avro format settings.
+ FormatSettings *AvroWriteSettings `json:"formatSettings,omitempty"`
+ // AdditionalProperties - Unmatched properties from the message are deserialized this collection
+ AdditionalProperties map[string]interface{} `json:""`
+ // WriteBatchSize - Write batch size. Type: integer (or Expression with resultType integer), minimum: 0.
+ WriteBatchSize interface{} `json:"writeBatchSize,omitempty"`
+ // WriteBatchTimeout - Write batch timeout. Type: string (or Expression with resultType string), pattern: ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ WriteBatchTimeout interface{} `json:"writeBatchTimeout,omitempty"`
+ // SinkRetryCount - Sink retry count. Type: integer (or Expression with resultType integer).
+ SinkRetryCount interface{} `json:"sinkRetryCount,omitempty"`
+ // SinkRetryWait - Sink retry wait. Type: string (or Expression with resultType string), pattern: ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
+ // MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
+ MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
+ Type TypeBasicCopySink `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for AvroSink.
+func (as AvroSink) MarshalJSON() ([]byte, error) {
+ as.Type = TypeAvroSink
+ objectMap := make(map[string]interface{})
+ objectMap["storeSettings"] = as.StoreSettings
+ if as.FormatSettings != nil {
+ objectMap["formatSettings"] = as.FormatSettings
+ }
+ if as.WriteBatchSize != nil {
+ objectMap["writeBatchSize"] = as.WriteBatchSize
+ }
+ if as.WriteBatchTimeout != nil {
+ objectMap["writeBatchTimeout"] = as.WriteBatchTimeout
+ }
+ if as.SinkRetryCount != nil {
+ objectMap["sinkRetryCount"] = as.SinkRetryCount
+ }
+ if as.SinkRetryWait != nil {
+ objectMap["sinkRetryWait"] = as.SinkRetryWait
+ }
+ if as.MaxConcurrentConnections != nil {
+ objectMap["maxConcurrentConnections"] = as.MaxConcurrentConnections
+ }
+ if as.Type != "" {
+ objectMap["type"] = as.Type
+ }
+ for k, v := range as.AdditionalProperties {
+ objectMap[k] = v
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsCosmosDbMongoDbAPISink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsCosmosDbMongoDbAPISink() (*CosmosDbMongoDbAPISink, bool) {
+ return nil, false
+}
+
+// AsSalesforceServiceCloudSink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsSalesforceServiceCloudSink() (*SalesforceServiceCloudSink, bool) {
+ return nil, false
+}
+
+// AsSalesforceSink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsSalesforceSink() (*SalesforceSink, bool) {
+ return nil, false
+}
+
+// AsAzureDataExplorerSink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsAzureDataExplorerSink() (*AzureDataExplorerSink, bool) {
+ return nil, false
+}
+
+// AsCommonDataServiceForAppsSink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsCommonDataServiceForAppsSink() (*CommonDataServiceForAppsSink, bool) {
+ return nil, false
+}
+
+// AsDynamicsCrmSink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsDynamicsCrmSink() (*DynamicsCrmSink, bool) {
+ return nil, false
+}
+
+// AsDynamicsSink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsDynamicsSink() (*DynamicsSink, bool) {
+ return nil, false
+}
+
+// AsMicrosoftAccessSink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsMicrosoftAccessSink() (*MicrosoftAccessSink, bool) {
+ return nil, false
+}
+
+// AsInformixSink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsInformixSink() (*InformixSink, bool) {
+ return nil, false
+}
+
+// AsOdbcSink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsOdbcSink() (*OdbcSink, bool) {
+ return nil, false
+}
+
+// AsAzureSearchIndexSink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsAzureSearchIndexSink() (*AzureSearchIndexSink, bool) {
+ return nil, false
+}
+
+// AsAzureBlobFSSink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsAzureBlobFSSink() (*AzureBlobFSSink, bool) {
+ return nil, false
+}
+
+// AsAzureDataLakeStoreSink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsAzureDataLakeStoreSink() (*AzureDataLakeStoreSink, bool) {
+ return nil, false
+}
+
+// AsOracleSink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsOracleSink() (*OracleSink, bool) {
+ return nil, false
+}
+
+// AsSQLDWSink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsSQLDWSink() (*SQLDWSink, bool) {
+ return nil, false
+}
+
+// AsSQLMISink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsSQLMISink() (*SQLMISink, bool) {
+ return nil, false
+}
+
+// AsAzureSQLSink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsAzureSQLSink() (*AzureSQLSink, bool) {
+ return nil, false
+}
+
+// AsSQLServerSink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsSQLServerSink() (*SQLServerSink, bool) {
+ return nil, false
+}
+
+// AsSQLSink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsSQLSink() (*SQLSink, bool) {
+ return nil, false
+}
+
+// AsDocumentDbCollectionSink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsDocumentDbCollectionSink() (*DocumentDbCollectionSink, bool) {
+ return nil, false
+}
+
+// AsFileSystemSink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsFileSystemSink() (*FileSystemSink, bool) {
+ return nil, false
+}
+
+// AsBlobSink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsBlobSink() (*BlobSink, bool) {
+ return nil, false
+}
+
+// AsBinarySink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsBinarySink() (*BinarySink, bool) {
+ return nil, false
+}
+
+// AsParquetSink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsParquetSink() (*ParquetSink, bool) {
+ return nil, false
+}
+
+// AsAvroSink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsAvroSink() (*AvroSink, bool) {
+ return &as, true
+}
+
+// AsAzureTableSink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsAzureTableSink() (*AzureTableSink, bool) {
+ return nil, false
+}
+
+// AsAzureQueueSink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsAzureQueueSink() (*AzureQueueSink, bool) {
+ return nil, false
+}
+
+// AsSapCloudForCustomerSink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsSapCloudForCustomerSink() (*SapCloudForCustomerSink, bool) {
+ return nil, false
+}
+
+// AsAzureMySQLSink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
+// AsAzurePostgreSQLSink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
+ return nil, false
+}
+
+// AsJSONSink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
+// AsDelimitedTextSink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
+ return nil, false
+}
+
+// AsCopySink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsCopySink() (*CopySink, bool) {
+ return nil, false
+}
+
+// AsBasicCopySink is the BasicCopySink implementation for AvroSink.
+func (as AvroSink) AsBasicCopySink() (BasicCopySink, bool) {
+ return &as, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for AvroSink struct.
+func (as *AvroSink) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "storeSettings":
+ if v != nil {
+ storeSettings, err := unmarshalBasicStoreWriteSettings(*v)
+ if err != nil {
+ return err
+ }
+ as.StoreSettings = storeSettings
+ }
+ case "formatSettings":
+ if v != nil {
+ var formatSettings AvroWriteSettings
+ err = json.Unmarshal(*v, &formatSettings)
+ if err != nil {
+ return err
+ }
+ as.FormatSettings = &formatSettings
+ }
+ default:
+ if v != nil {
+ var additionalProperties interface{}
+ err = json.Unmarshal(*v, &additionalProperties)
+ if err != nil {
+ return err
+ }
+ if as.AdditionalProperties == nil {
+ as.AdditionalProperties = make(map[string]interface{})
+ }
+ as.AdditionalProperties[k] = additionalProperties
+ }
+ case "writeBatchSize":
+ if v != nil {
+ var writeBatchSize interface{}
+ err = json.Unmarshal(*v, &writeBatchSize)
+ if err != nil {
+ return err
+ }
+ as.WriteBatchSize = writeBatchSize
+ }
+ case "writeBatchTimeout":
+ if v != nil {
+ var writeBatchTimeout interface{}
+ err = json.Unmarshal(*v, &writeBatchTimeout)
+ if err != nil {
+ return err
+ }
+ as.WriteBatchTimeout = writeBatchTimeout
+ }
+ case "sinkRetryCount":
+ if v != nil {
+ var sinkRetryCount interface{}
+ err = json.Unmarshal(*v, &sinkRetryCount)
+ if err != nil {
+ return err
+ }
+ as.SinkRetryCount = sinkRetryCount
+ }
+ case "sinkRetryWait":
+ if v != nil {
+ var sinkRetryWait interface{}
+ err = json.Unmarshal(*v, &sinkRetryWait)
+ if err != nil {
+ return err
+ }
+ as.SinkRetryWait = sinkRetryWait
+ }
+ case "maxConcurrentConnections":
+ if v != nil {
+ var maxConcurrentConnections interface{}
+ err = json.Unmarshal(*v, &maxConcurrentConnections)
+ if err != nil {
+ return err
+ }
+ as.MaxConcurrentConnections = maxConcurrentConnections
+ }
+ case "type":
+ if v != nil {
+ var typeVar TypeBasicCopySink
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ as.Type = typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// AvroSource a copy activity Avro source.
+type AvroSource struct {
+ // StoreSettings - Avro store settings.
+ StoreSettings *StoreReadSettings `json:"storeSettings,omitempty"`
+ // AdditionalProperties - Unmatched properties from the message are deserialized this collection
+ AdditionalProperties map[string]interface{} `json:""`
+ // SourceRetryCount - Source retry count. Type: integer (or Expression with resultType integer).
+ SourceRetryCount interface{} `json:"sourceRetryCount,omitempty"`
+ // SourceRetryWait - Source retry wait. Type: string (or Expression with resultType string), pattern: ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
+ // MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
+ MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
+ Type TypeBasicCopySource `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for AvroSource.
+func (as AvroSource) MarshalJSON() ([]byte, error) {
+ as.Type = TypeAvroSource
+ objectMap := make(map[string]interface{})
+ if as.StoreSettings != nil {
+ objectMap["storeSettings"] = as.StoreSettings
+ }
+ if as.SourceRetryCount != nil {
+ objectMap["sourceRetryCount"] = as.SourceRetryCount
+ }
+ if as.SourceRetryWait != nil {
+ objectMap["sourceRetryWait"] = as.SourceRetryWait
+ }
+ if as.MaxConcurrentConnections != nil {
+ objectMap["maxConcurrentConnections"] = as.MaxConcurrentConnections
+ }
+ if as.Type != "" {
+ objectMap["type"] = as.Type
+ }
+ for k, v := range as.AdditionalProperties {
+ objectMap[k] = v
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsAmazonRedshiftSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsAmazonRedshiftSource() (*AmazonRedshiftSource, bool) {
+ return nil, false
+}
+
+// AsGoogleAdWordsSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsGoogleAdWordsSource() (*GoogleAdWordsSource, bool) {
+ return nil, false
+}
+
+// AsOracleServiceCloudSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsOracleServiceCloudSource() (*OracleServiceCloudSource, bool) {
+ return nil, false
+}
+
+// AsDynamicsAXSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsDynamicsAXSource() (*DynamicsAXSource, bool) {
+ return nil, false
+}
+
+// AsResponsysSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsResponsysSource() (*ResponsysSource, bool) {
+ return nil, false
+}
+
+// AsSalesforceMarketingCloudSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsSalesforceMarketingCloudSource() (*SalesforceMarketingCloudSource, bool) {
+ return nil, false
+}
+
+// AsVerticaSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsVerticaSource() (*VerticaSource, bool) {
+ return nil, false
+}
+
+// AsNetezzaSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsNetezzaSource() (*NetezzaSource, bool) {
+ return nil, false
+}
+
+// AsZohoSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsZohoSource() (*ZohoSource, bool) {
+ return nil, false
+}
+
+// AsXeroSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsXeroSource() (*XeroSource, bool) {
+ return nil, false
+}
+
+// AsSquareSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsSquareSource() (*SquareSource, bool) {
+ return nil, false
+}
+
+// AsSparkSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsSparkSource() (*SparkSource, bool) {
+ return nil, false
+}
+
+// AsShopifySource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsShopifySource() (*ShopifySource, bool) {
+ return nil, false
+}
+
+// AsServiceNowSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsServiceNowSource() (*ServiceNowSource, bool) {
+ return nil, false
+}
+
+// AsQuickBooksSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsQuickBooksSource() (*QuickBooksSource, bool) {
+ return nil, false
+}
+
+// AsPrestoSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsPrestoSource() (*PrestoSource, bool) {
+ return nil, false
+}
+
+// AsPhoenixSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsPhoenixSource() (*PhoenixSource, bool) {
+ return nil, false
+}
+
+// AsPaypalSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsPaypalSource() (*PaypalSource, bool) {
+ return nil, false
+}
+
+// AsMarketoSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsMarketoSource() (*MarketoSource, bool) {
+ return nil, false
+}
+
+// AsAzureMariaDBSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
+// AsMariaDBSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsMariaDBSource() (*MariaDBSource, bool) {
+ return nil, false
+}
+
+// AsMagentoSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsMagentoSource() (*MagentoSource, bool) {
+ return nil, false
+}
+
+// AsJiraSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsJiraSource() (*JiraSource, bool) {
+ return nil, false
+}
+
+// AsImpalaSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsImpalaSource() (*ImpalaSource, bool) {
+ return nil, false
+}
+
+// AsHubspotSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsHubspotSource() (*HubspotSource, bool) {
+ return nil, false
+}
+
+// AsHiveSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsHiveSource() (*HiveSource, bool) {
+ return nil, false
+}
+
+// AsHBaseSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsHBaseSource() (*HBaseSource, bool) {
+ return nil, false
+}
+
+// AsGreenplumSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsGreenplumSource() (*GreenplumSource, bool) {
+ return nil, false
+}
+
+// AsGoogleBigQuerySource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsGoogleBigQuerySource() (*GoogleBigQuerySource, bool) {
+ return nil, false
+}
+
+// AsEloquaSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsEloquaSource() (*EloquaSource, bool) {
+ return nil, false
+}
+
+// AsDrillSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsDrillSource() (*DrillSource, bool) {
+ return nil, false
+}
+
+// AsCouchbaseSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsCouchbaseSource() (*CouchbaseSource, bool) {
+ return nil, false
+}
+
+// AsConcurSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsConcurSource() (*ConcurSource, bool) {
+ return nil, false
+}
+
+// AsAzurePostgreSQLSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsAzurePostgreSQLSource() (*AzurePostgreSQLSource, bool) {
+ return nil, false
+}
+
+// AsAmazonMWSSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsAmazonMWSSource() (*AmazonMWSSource, bool) {
+ return nil, false
+}
+
+// AsHTTPSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsHTTPSource() (*HTTPSource, bool) {
+ return nil, false
+}
+
+// AsAzureBlobFSSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsAzureBlobFSSource() (*AzureBlobFSSource, bool) {
+ return nil, false
+}
+
+// AsAzureDataLakeStoreSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsAzureDataLakeStoreSource() (*AzureDataLakeStoreSource, bool) {
+ return nil, false
+}
+
+// AsOffice365Source is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsOffice365Source() (*Office365Source, bool) {
+ return nil, false
+}
+
+// AsCosmosDbMongoDbAPISource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsCosmosDbMongoDbAPISource() (*CosmosDbMongoDbAPISource, bool) {
+ return nil, false
+}
+
+// AsMongoDbV2Source is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsMongoDbV2Source() (*MongoDbV2Source, bool) {
+ return nil, false
+}
+
+// AsMongoDbSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsMongoDbSource() (*MongoDbSource, bool) {
+ return nil, false
+}
+
+// AsCassandraSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsCassandraSource() (*CassandraSource, bool) {
+ return nil, false
+}
+
+// AsWebSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsWebSource() (*WebSource, bool) {
+ return nil, false
+}
+
+// AsTeradataSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsTeradataSource() (*TeradataSource, bool) {
+ return nil, false
+}
+
+// AsOracleSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsOracleSource() (*OracleSource, bool) {
+ return nil, false
+}
+
+// AsAzureDataExplorerSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsAzureDataExplorerSource() (*AzureDataExplorerSource, bool) {
+ return nil, false
+}
+
+// AsAzureMySQLSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsAzureMySQLSource() (*AzureMySQLSource, bool) {
+ return nil, false
+}
+
+// AsHdfsSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsHdfsSource() (*HdfsSource, bool) {
+ return nil, false
+}
+
+// AsFileSystemSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsFileSystemSource() (*FileSystemSource, bool) {
+ return nil, false
+}
+
+// AsSQLDWSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsSQLDWSource() (*SQLDWSource, bool) {
+ return nil, false
+}
+
+// AsSQLMISource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsSQLMISource() (*SQLMISource, bool) {
+ return nil, false
+}
+
+// AsAzureSQLSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsAzureSQLSource() (*AzureSQLSource, bool) {
+ return nil, false
+}
+
+// AsSQLServerSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsSQLServerSource() (*SQLServerSource, bool) {
+ return nil, false
+}
+
+// AsSQLSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsSQLSource() (*SQLSource, bool) {
+ return nil, false
+}
+
+// AsRestSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsRestSource() (*RestSource, bool) {
+ return nil, false
+}
+
+// AsSapTableSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsSapTableSource() (*SapTableSource, bool) {
+ return nil, false
+}
+
+// AsSapOpenHubSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsSapOpenHubSource() (*SapOpenHubSource, bool) {
+ return nil, false
+}
+
+// AsSapHanaSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsSapHanaSource() (*SapHanaSource, bool) {
+ return nil, false
+}
+
+// AsSapEccSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsSapEccSource() (*SapEccSource, bool) {
+ return nil, false
+}
+
+// AsSapCloudForCustomerSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsSapCloudForCustomerSource() (*SapCloudForCustomerSource, bool) {
+ return nil, false
+}
+
+// AsSalesforceServiceCloudSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsSalesforceServiceCloudSource() (*SalesforceServiceCloudSource, bool) {
+ return nil, false
+}
+
+// AsSalesforceSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsSalesforceSource() (*SalesforceSource, bool) {
+ return nil, false
+}
+
+// AsODataSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsODataSource() (*ODataSource, bool) {
+ return nil, false
+}
+
+// AsSapBwSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
+// AsSybaseSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsSybaseSource() (*SybaseSource, bool) {
+ return nil, false
+}
+
+// AsPostgreSQLSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsPostgreSQLSource() (*PostgreSQLSource, bool) {
+ return nil, false
+}
+
+// AsMySQLSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsMySQLSource() (*MySQLSource, bool) {
+ return nil, false
+}
+
+// AsOdbcSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsOdbcSource() (*OdbcSource, bool) {
+ return nil, false
+}
+
+// AsDb2Source is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsDb2Source() (*Db2Source, bool) {
+ return nil, false
+}
+
+// AsMicrosoftAccessSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsMicrosoftAccessSource() (*MicrosoftAccessSource, bool) {
+ return nil, false
+}
+
+// AsInformixSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsInformixSource() (*InformixSource, bool) {
+ return nil, false
+}
+
+// AsRelationalSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsRelationalSource() (*RelationalSource, bool) {
+ return nil, false
+}
+
+// AsCommonDataServiceForAppsSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsCommonDataServiceForAppsSource() (*CommonDataServiceForAppsSource, bool) {
+ return nil, false
+}
+
+// AsDynamicsCrmSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsDynamicsCrmSource() (*DynamicsCrmSource, bool) {
+ return nil, false
+}
+
+// AsDynamicsSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsDynamicsSource() (*DynamicsSource, bool) {
+ return nil, false
+}
+
+// AsDocumentDbCollectionSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsDocumentDbCollectionSource() (*DocumentDbCollectionSource, bool) {
+ return nil, false
+}
+
+// AsBlobSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsBlobSource() (*BlobSource, bool) {
+ return nil, false
+}
+
+// AsAzureTableSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsAzureTableSource() (*AzureTableSource, bool) {
+ return nil, false
+}
+
+// AsBinarySource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsBinarySource() (*BinarySource, bool) {
+ return nil, false
+}
+
+// AsJSONSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
+// AsDelimitedTextSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
+ return nil, false
+}
+
+// AsParquetSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsParquetSource() (*ParquetSource, bool) {
+ return nil, false
+}
+
+// AsAvroSource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsAvroSource() (*AvroSource, bool) {
+ return &as, true
+}
+
+// AsCopySource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsCopySource() (*CopySource, bool) {
+ return nil, false
+}
+
+// AsBasicCopySource is the BasicCopySource implementation for AvroSource.
+func (as AvroSource) AsBasicCopySource() (BasicCopySource, bool) {
+ return &as, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for AvroSource struct.
+func (as *AvroSource) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "storeSettings":
+ if v != nil {
+ var storeSettings StoreReadSettings
+ err = json.Unmarshal(*v, &storeSettings)
+ if err != nil {
+ return err
+ }
+ as.StoreSettings = &storeSettings
+ }
+ default:
+ if v != nil {
+ var additionalProperties interface{}
+ err = json.Unmarshal(*v, &additionalProperties)
+ if err != nil {
+ return err
+ }
+ if as.AdditionalProperties == nil {
+ as.AdditionalProperties = make(map[string]interface{})
+ }
+ as.AdditionalProperties[k] = additionalProperties
+ }
+ case "sourceRetryCount":
+ if v != nil {
+ var sourceRetryCount interface{}
+ err = json.Unmarshal(*v, &sourceRetryCount)
+ if err != nil {
+ return err
+ }
+ as.SourceRetryCount = sourceRetryCount
+ }
+ case "sourceRetryWait":
+ if v != nil {
+ var sourceRetryWait interface{}
+ err = json.Unmarshal(*v, &sourceRetryWait)
+ if err != nil {
+ return err
+ }
+ as.SourceRetryWait = sourceRetryWait
+ }
+ case "maxConcurrentConnections":
+ if v != nil {
+ var maxConcurrentConnections interface{}
+ err = json.Unmarshal(*v, &maxConcurrentConnections)
+ if err != nil {
+ return err
+ }
+ as.MaxConcurrentConnections = maxConcurrentConnections
+ }
+ case "type":
+ if v != nil {
+ var typeVar TypeBasicCopySource
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ as.Type = typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// AvroWriteSettings avro write settings.
+type AvroWriteSettings struct {
+ // RecordName - Top level record name in write result, which is required in AVRO spec.
+ RecordName *string `json:"recordName,omitempty"`
+ // RecordNamespace - Record namespace in the write result.
+ RecordNamespace *string `json:"recordNamespace,omitempty"`
+ // AdditionalProperties - Unmatched properties from the message are deserialized this collection
+ AdditionalProperties map[string]interface{} `json:""`
+ // Type - The write setting type.
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for AvroWriteSettings.
+func (aws AvroWriteSettings) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if aws.RecordName != nil {
+ objectMap["recordName"] = aws.RecordName
+ }
+ if aws.RecordNamespace != nil {
+ objectMap["recordNamespace"] = aws.RecordNamespace
+ }
+ if aws.Type != nil {
+ objectMap["type"] = aws.Type
+ }
+ for k, v := range aws.AdditionalProperties {
+ objectMap[k] = v
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for AvroWriteSettings struct.
+func (aws *AvroWriteSettings) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "recordName":
+ if v != nil {
+ var recordName string
+ err = json.Unmarshal(*v, &recordName)
+ if err != nil {
+ return err
+ }
+ aws.RecordName = &recordName
+ }
+ case "recordNamespace":
+ if v != nil {
+ var recordNamespace string
+ err = json.Unmarshal(*v, &recordNamespace)
+ if err != nil {
+ return err
+ }
+ aws.RecordNamespace = &recordNamespace
+ }
+ default:
+ if v != nil {
+ var additionalProperties interface{}
+ err = json.Unmarshal(*v, &additionalProperties)
+ if err != nil {
+ return err
+ }
+ if aws.AdditionalProperties == nil {
+ aws.AdditionalProperties = make(map[string]interface{})
+ }
+ aws.AdditionalProperties[k] = additionalProperties
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ aws.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
// AzureBatchLinkedService azure Batch linked service.
type AzureBatchLinkedService struct {
// AzureBatchLinkedServiceTypeProperties - Azure Batch linked service properties.
@@ -8148,7 +10557,7 @@ type AzureBatchLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -8300,6 +10709,11 @@ func (abls AzureBatchLinkedService) AsMarketoLinkedService() (*MarketoLinkedServ
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for AzureBatchLinkedService.
+func (abls AzureBatchLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for AzureBatchLinkedService.
func (abls AzureBatchLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -8825,7 +11239,7 @@ type AzureBlobDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -8961,6 +11375,11 @@ func (abd AzureBlobDataset) AsMarketoObjectDataset() (*MarketoObjectDataset, boo
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for AzureBlobDataset.
+func (abd AzureBlobDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for AzureBlobDataset.
func (abd AzureBlobDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -9086,6 +11505,16 @@ func (abd AzureBlobDataset) AsSapCloudForCustomerResourceDataset() (*SapCloudFor
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for AzureBlobDataset.
+func (abd AzureBlobDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for AzureBlobDataset.
+func (abd AzureBlobDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for AzureBlobDataset.
func (abd AzureBlobDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -9126,6 +11555,16 @@ func (abd AzureBlobDataset) AsRelationalTableDataset() (*RelationalTableDataset,
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for AzureBlobDataset.
+func (abd AzureBlobDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for AzureBlobDataset.
+func (abd AzureBlobDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for AzureBlobDataset.
func (abd AzureBlobDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -9241,6 +11680,11 @@ func (abd AzureBlobDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for AzureBlobDataset.
+func (abd AzureBlobDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for AzureBlobDataset.
func (abd AzureBlobDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -9251,6 +11695,11 @@ func (abd AzureBlobDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for AzureBlobDataset.
+func (abd AzureBlobDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for AzureBlobDataset.
func (abd AzureBlobDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -9488,7 +11937,7 @@ type AzureBlobFSDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -9624,6 +12073,11 @@ func (abfd AzureBlobFSDataset) AsMarketoObjectDataset() (*MarketoObjectDataset,
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for AzureBlobFSDataset.
+func (abfd AzureBlobFSDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for AzureBlobFSDataset.
func (abfd AzureBlobFSDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -9749,6 +12203,16 @@ func (abfd AzureBlobFSDataset) AsSapCloudForCustomerResourceDataset() (*SapCloud
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for AzureBlobFSDataset.
+func (abfd AzureBlobFSDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for AzureBlobFSDataset.
+func (abfd AzureBlobFSDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for AzureBlobFSDataset.
func (abfd AzureBlobFSDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -9789,6 +12253,16 @@ func (abfd AzureBlobFSDataset) AsRelationalTableDataset() (*RelationalTableDatas
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for AzureBlobFSDataset.
+func (abfd AzureBlobFSDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for AzureBlobFSDataset.
+func (abfd AzureBlobFSDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for AzureBlobFSDataset.
func (abfd AzureBlobFSDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -9904,6 +12378,11 @@ func (abfd AzureBlobFSDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for AzureBlobFSDataset.
+func (abfd AzureBlobFSDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for AzureBlobFSDataset.
func (abfd AzureBlobFSDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -9914,6 +12393,11 @@ func (abfd AzureBlobFSDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for AzureBlobFSDataset.
+func (abfd AzureBlobFSDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for AzureBlobFSDataset.
func (abfd AzureBlobFSDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -10112,7 +12596,7 @@ type AzureBlobFSLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -10264,6 +12748,11 @@ func (abfls AzureBlobFSLinkedService) AsMarketoLinkedService() (*MarketoLinkedSe
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for AzureBlobFSLinkedService.
+func (abfls AzureBlobFSLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for AzureBlobFSLinkedService.
func (abfls AzureBlobFSLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -11037,7 +13526,7 @@ type AzureBlobFSSink struct {
SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzurePostgreSQLSink', 'TypeDelimitedTextSink'
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
Type TypeBasicCopySink `json:"type,omitempty"`
}
@@ -11192,6 +13681,11 @@ func (abfs AzureBlobFSSink) AsParquetSink() (*ParquetSink, bool) {
return nil, false
}
+// AsAvroSink is the BasicCopySink implementation for AzureBlobFSSink.
+func (abfs AzureBlobFSSink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
// AsAzureTableSink is the BasicCopySink implementation for AzureBlobFSSink.
func (abfs AzureBlobFSSink) AsAzureTableSink() (*AzureTableSink, bool) {
return nil, false
@@ -11207,11 +13701,21 @@ func (abfs AzureBlobFSSink) AsSapCloudForCustomerSink() (*SapCloudForCustomerSin
return nil, false
}
+// AsAzureMySQLSink is the BasicCopySink implementation for AzureBlobFSSink.
+func (abfs AzureBlobFSSink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
// AsAzurePostgreSQLSink is the BasicCopySink implementation for AzureBlobFSSink.
func (abfs AzureBlobFSSink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
return nil, false
}
+// AsJSONSink is the BasicCopySink implementation for AzureBlobFSSink.
+func (abfs AzureBlobFSSink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSink is the BasicCopySink implementation for AzureBlobFSSink.
func (abfs AzureBlobFSSink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
return nil, false
@@ -11333,7 +13837,7 @@ type AzureBlobFSSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -11463,6 +13967,11 @@ func (abfs AzureBlobFSSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for AzureBlobFSSource.
+func (abfs AzureBlobFSSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for AzureBlobFSSource.
func (abfs AzureBlobFSSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -11683,6 +14192,11 @@ func (abfs AzureBlobFSSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for AzureBlobFSSource.
+func (abfs AzureBlobFSSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for AzureBlobFSSource.
func (abfs AzureBlobFSSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -11758,6 +14272,11 @@ func (abfs AzureBlobFSSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for AzureBlobFSSource.
+func (abfs AzureBlobFSSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for AzureBlobFSSource.
func (abfs AzureBlobFSSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -11768,6 +14287,11 @@ func (abfs AzureBlobFSSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for AzureBlobFSSource.
+func (abfs AzureBlobFSSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for AzureBlobFSSource.
func (abfs AzureBlobFSSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -11870,21 +14394,24 @@ func (abfs *AzureBlobFSSource) UnmarshalJSON(body []byte) error {
// AzureBlobFSWriteSettings azure blobFS write settings.
type AzureBlobFSWriteSettings struct {
+ // BlockSizeInMB - Indicates the block size(MB) when writing data to blob. Type: integer (or Expression with resultType integer).
+ BlockSizeInMB interface{} `json:"blockSizeInMB,omitempty"`
// AdditionalProperties - Unmatched properties from the message are deserialized this collection
AdditionalProperties map[string]interface{} `json:""`
- // Type - The write setting type.
- Type *string `json:"type,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
// CopyBehavior - The type of copy behavior for copy sink.
CopyBehavior interface{} `json:"copyBehavior,omitempty"`
+ // Type - Possible values include: 'TypeStoreWriteSettings', 'TypeFileServerWriteSettings', 'TypeAzureDataLakeStoreWriteSettings', 'TypeAzureBlobFSWriteSettings', 'TypeAzureBlobStorageWriteSettings'
+ Type TypeBasicStoreWriteSettings `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for AzureBlobFSWriteSettings.
func (abfws AzureBlobFSWriteSettings) MarshalJSON() ([]byte, error) {
+ abfws.Type = TypeAzureBlobFSWriteSettings
objectMap := make(map[string]interface{})
- if abfws.Type != nil {
- objectMap["type"] = abfws.Type
+ if abfws.BlockSizeInMB != nil {
+ objectMap["blockSizeInMB"] = abfws.BlockSizeInMB
}
if abfws.MaxConcurrentConnections != nil {
objectMap["maxConcurrentConnections"] = abfws.MaxConcurrentConnections
@@ -11892,12 +14419,45 @@ func (abfws AzureBlobFSWriteSettings) MarshalJSON() ([]byte, error) {
if abfws.CopyBehavior != nil {
objectMap["copyBehavior"] = abfws.CopyBehavior
}
+ if abfws.Type != "" {
+ objectMap["type"] = abfws.Type
+ }
for k, v := range abfws.AdditionalProperties {
objectMap[k] = v
}
return json.Marshal(objectMap)
}
+// AsFileServerWriteSettings is the BasicStoreWriteSettings implementation for AzureBlobFSWriteSettings.
+func (abfws AzureBlobFSWriteSettings) AsFileServerWriteSettings() (*FileServerWriteSettings, bool) {
+ return nil, false
+}
+
+// AsAzureDataLakeStoreWriteSettings is the BasicStoreWriteSettings implementation for AzureBlobFSWriteSettings.
+func (abfws AzureBlobFSWriteSettings) AsAzureDataLakeStoreWriteSettings() (*AzureDataLakeStoreWriteSettings, bool) {
+ return nil, false
+}
+
+// AsAzureBlobFSWriteSettings is the BasicStoreWriteSettings implementation for AzureBlobFSWriteSettings.
+func (abfws AzureBlobFSWriteSettings) AsAzureBlobFSWriteSettings() (*AzureBlobFSWriteSettings, bool) {
+ return &abfws, true
+}
+
+// AsAzureBlobStorageWriteSettings is the BasicStoreWriteSettings implementation for AzureBlobFSWriteSettings.
+func (abfws AzureBlobFSWriteSettings) AsAzureBlobStorageWriteSettings() (*AzureBlobStorageWriteSettings, bool) {
+ return nil, false
+}
+
+// AsStoreWriteSettings is the BasicStoreWriteSettings implementation for AzureBlobFSWriteSettings.
+func (abfws AzureBlobFSWriteSettings) AsStoreWriteSettings() (*StoreWriteSettings, bool) {
+ return nil, false
+}
+
+// AsBasicStoreWriteSettings is the BasicStoreWriteSettings implementation for AzureBlobFSWriteSettings.
+func (abfws AzureBlobFSWriteSettings) AsBasicStoreWriteSettings() (BasicStoreWriteSettings, bool) {
+ return &abfws, true
+}
+
// UnmarshalJSON is the custom unmarshaler for AzureBlobFSWriteSettings struct.
func (abfws *AzureBlobFSWriteSettings) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
@@ -11907,6 +14467,15 @@ func (abfws *AzureBlobFSWriteSettings) UnmarshalJSON(body []byte) error {
}
for k, v := range m {
switch k {
+ case "blockSizeInMB":
+ if v != nil {
+ var blockSizeInMB interface{}
+ err = json.Unmarshal(*v, &blockSizeInMB)
+ if err != nil {
+ return err
+ }
+ abfws.BlockSizeInMB = blockSizeInMB
+ }
default:
if v != nil {
var additionalProperties interface{}
@@ -11919,15 +14488,6 @@ func (abfws *AzureBlobFSWriteSettings) UnmarshalJSON(body []byte) error {
}
abfws.AdditionalProperties[k] = additionalProperties
}
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- abfws.Type = &typeVar
- }
case "maxConcurrentConnections":
if v != nil {
var maxConcurrentConnections interface{}
@@ -11946,6 +14506,15 @@ func (abfws *AzureBlobFSWriteSettings) UnmarshalJSON(body []byte) error {
}
abfws.CopyBehavior = copyBehavior
}
+ case "type":
+ if v != nil {
+ var typeVar TypeBasicStoreWriteSettings
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ abfws.Type = typeVar
+ }
}
}
@@ -11966,7 +14535,7 @@ type AzureBlobStorageLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -12118,6 +14687,11 @@ func (absls AzureBlobStorageLinkedService) AsMarketoLinkedService() (*MarketoLin
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for AzureBlobStorageLinkedService.
+func (absls AzureBlobStorageLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for AzureBlobStorageLinkedService.
func (absls AzureBlobStorageLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -12910,21 +15484,24 @@ func (absrs *AzureBlobStorageReadSettings) UnmarshalJSON(body []byte) error {
// AzureBlobStorageWriteSettings azure blob write settings.
type AzureBlobStorageWriteSettings struct {
+ // BlockSizeInMB - Indicates the block size(MB) when writing data to blob. Type: integer (or Expression with resultType integer).
+ BlockSizeInMB interface{} `json:"blockSizeInMB,omitempty"`
// AdditionalProperties - Unmatched properties from the message are deserialized this collection
AdditionalProperties map[string]interface{} `json:""`
- // Type - The write setting type.
- Type *string `json:"type,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
// CopyBehavior - The type of copy behavior for copy sink.
CopyBehavior interface{} `json:"copyBehavior,omitempty"`
+ // Type - Possible values include: 'TypeStoreWriteSettings', 'TypeFileServerWriteSettings', 'TypeAzureDataLakeStoreWriteSettings', 'TypeAzureBlobFSWriteSettings', 'TypeAzureBlobStorageWriteSettings'
+ Type TypeBasicStoreWriteSettings `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for AzureBlobStorageWriteSettings.
func (absws AzureBlobStorageWriteSettings) MarshalJSON() ([]byte, error) {
+ absws.Type = TypeAzureBlobStorageWriteSettings
objectMap := make(map[string]interface{})
- if absws.Type != nil {
- objectMap["type"] = absws.Type
+ if absws.BlockSizeInMB != nil {
+ objectMap["blockSizeInMB"] = absws.BlockSizeInMB
}
if absws.MaxConcurrentConnections != nil {
objectMap["maxConcurrentConnections"] = absws.MaxConcurrentConnections
@@ -12932,12 +15509,45 @@ func (absws AzureBlobStorageWriteSettings) MarshalJSON() ([]byte, error) {
if absws.CopyBehavior != nil {
objectMap["copyBehavior"] = absws.CopyBehavior
}
+ if absws.Type != "" {
+ objectMap["type"] = absws.Type
+ }
for k, v := range absws.AdditionalProperties {
objectMap[k] = v
}
return json.Marshal(objectMap)
}
+// AsFileServerWriteSettings is the BasicStoreWriteSettings implementation for AzureBlobStorageWriteSettings.
+func (absws AzureBlobStorageWriteSettings) AsFileServerWriteSettings() (*FileServerWriteSettings, bool) {
+ return nil, false
+}
+
+// AsAzureDataLakeStoreWriteSettings is the BasicStoreWriteSettings implementation for AzureBlobStorageWriteSettings.
+func (absws AzureBlobStorageWriteSettings) AsAzureDataLakeStoreWriteSettings() (*AzureDataLakeStoreWriteSettings, bool) {
+ return nil, false
+}
+
+// AsAzureBlobFSWriteSettings is the BasicStoreWriteSettings implementation for AzureBlobStorageWriteSettings.
+func (absws AzureBlobStorageWriteSettings) AsAzureBlobFSWriteSettings() (*AzureBlobFSWriteSettings, bool) {
+ return nil, false
+}
+
+// AsAzureBlobStorageWriteSettings is the BasicStoreWriteSettings implementation for AzureBlobStorageWriteSettings.
+func (absws AzureBlobStorageWriteSettings) AsAzureBlobStorageWriteSettings() (*AzureBlobStorageWriteSettings, bool) {
+ return &absws, true
+}
+
+// AsStoreWriteSettings is the BasicStoreWriteSettings implementation for AzureBlobStorageWriteSettings.
+func (absws AzureBlobStorageWriteSettings) AsStoreWriteSettings() (*StoreWriteSettings, bool) {
+ return nil, false
+}
+
+// AsBasicStoreWriteSettings is the BasicStoreWriteSettings implementation for AzureBlobStorageWriteSettings.
+func (absws AzureBlobStorageWriteSettings) AsBasicStoreWriteSettings() (BasicStoreWriteSettings, bool) {
+ return &absws, true
+}
+
// UnmarshalJSON is the custom unmarshaler for AzureBlobStorageWriteSettings struct.
func (absws *AzureBlobStorageWriteSettings) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
@@ -12947,6 +15557,15 @@ func (absws *AzureBlobStorageWriteSettings) UnmarshalJSON(body []byte) error {
}
for k, v := range m {
switch k {
+ case "blockSizeInMB":
+ if v != nil {
+ var blockSizeInMB interface{}
+ err = json.Unmarshal(*v, &blockSizeInMB)
+ if err != nil {
+ return err
+ }
+ absws.BlockSizeInMB = blockSizeInMB
+ }
default:
if v != nil {
var additionalProperties interface{}
@@ -12959,15 +15578,6 @@ func (absws *AzureBlobStorageWriteSettings) UnmarshalJSON(body []byte) error {
}
absws.AdditionalProperties[k] = additionalProperties
}
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- absws.Type = &typeVar
- }
case "maxConcurrentConnections":
if v != nil {
var maxConcurrentConnections interface{}
@@ -12986,6 +15596,15 @@ func (absws *AzureBlobStorageWriteSettings) UnmarshalJSON(body []byte) error {
}
absws.CopyBehavior = copyBehavior
}
+ case "type":
+ if v != nil {
+ var typeVar TypeBasicStoreWriteSettings
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ absws.Type = typeVar
+ }
}
}
@@ -13006,7 +15625,7 @@ type AzureDatabricksLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -13158,6 +15777,11 @@ func (adls AzureDatabricksLinkedService) AsMarketoLinkedService() (*MarketoLinke
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for AzureDatabricksLinkedService.
+func (adls AzureDatabricksLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for AzureDatabricksLinkedService.
func (adls AzureDatabricksLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -14151,7 +16775,7 @@ type AzureDataExplorerLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -14303,6 +16927,11 @@ func (adels AzureDataExplorerLinkedService) AsMarketoLinkedService() (*MarketoLi
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for AzureDataExplorerLinkedService.
+func (adels AzureDataExplorerLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for AzureDataExplorerLinkedService.
func (adels AzureDataExplorerLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -14817,7 +17446,7 @@ type AzureDataExplorerSink struct {
SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzurePostgreSQLSink', 'TypeDelimitedTextSink'
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
Type TypeBasicCopySink `json:"type,omitempty"`
}
@@ -14978,6 +17607,11 @@ func (ades AzureDataExplorerSink) AsParquetSink() (*ParquetSink, bool) {
return nil, false
}
+// AsAvroSink is the BasicCopySink implementation for AzureDataExplorerSink.
+func (ades AzureDataExplorerSink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
// AsAzureTableSink is the BasicCopySink implementation for AzureDataExplorerSink.
func (ades AzureDataExplorerSink) AsAzureTableSink() (*AzureTableSink, bool) {
return nil, false
@@ -14993,11 +17627,21 @@ func (ades AzureDataExplorerSink) AsSapCloudForCustomerSink() (*SapCloudForCusto
return nil, false
}
+// AsAzureMySQLSink is the BasicCopySink implementation for AzureDataExplorerSink.
+func (ades AzureDataExplorerSink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
// AsAzurePostgreSQLSink is the BasicCopySink implementation for AzureDataExplorerSink.
func (ades AzureDataExplorerSink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
return nil, false
}
+// AsJSONSink is the BasicCopySink implementation for AzureDataExplorerSink.
+func (ades AzureDataExplorerSink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSink is the BasicCopySink implementation for AzureDataExplorerSink.
func (ades AzureDataExplorerSink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
return nil, false
@@ -15137,7 +17781,7 @@ type AzureDataExplorerSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -15267,6 +17911,11 @@ func (ades AzureDataExplorerSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for AzureDataExplorerSource.
+func (ades AzureDataExplorerSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for AzureDataExplorerSource.
func (ades AzureDataExplorerSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -15487,6 +18136,11 @@ func (ades AzureDataExplorerSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for AzureDataExplorerSource.
+func (ades AzureDataExplorerSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for AzureDataExplorerSource.
func (ades AzureDataExplorerSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -15562,6 +18216,11 @@ func (ades AzureDataExplorerSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for AzureDataExplorerSource.
+func (ades AzureDataExplorerSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for AzureDataExplorerSource.
func (ades AzureDataExplorerSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -15572,6 +18231,11 @@ func (ades AzureDataExplorerSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for AzureDataExplorerSource.
+func (ades AzureDataExplorerSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for AzureDataExplorerSource.
func (ades AzureDataExplorerSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -15692,7 +18356,7 @@ type AzureDataExplorerTableDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -15828,6 +18492,11 @@ func (adetd AzureDataExplorerTableDataset) AsMarketoObjectDataset() (*MarketoObj
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for AzureDataExplorerTableDataset.
+func (adetd AzureDataExplorerTableDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for AzureDataExplorerTableDataset.
func (adetd AzureDataExplorerTableDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -15953,6 +18622,16 @@ func (adetd AzureDataExplorerTableDataset) AsSapCloudForCustomerResourceDataset(
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for AzureDataExplorerTableDataset.
+func (adetd AzureDataExplorerTableDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for AzureDataExplorerTableDataset.
+func (adetd AzureDataExplorerTableDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for AzureDataExplorerTableDataset.
func (adetd AzureDataExplorerTableDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -15993,6 +18672,16 @@ func (adetd AzureDataExplorerTableDataset) AsRelationalTableDataset() (*Relation
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for AzureDataExplorerTableDataset.
+func (adetd AzureDataExplorerTableDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for AzureDataExplorerTableDataset.
+func (adetd AzureDataExplorerTableDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for AzureDataExplorerTableDataset.
func (adetd AzureDataExplorerTableDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -16108,6 +18797,11 @@ func (adetd AzureDataExplorerTableDataset) AsBinaryDataset() (*BinaryDataset, bo
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for AzureDataExplorerTableDataset.
+func (adetd AzureDataExplorerTableDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for AzureDataExplorerTableDataset.
func (adetd AzureDataExplorerTableDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -16118,6 +18812,11 @@ func (adetd AzureDataExplorerTableDataset) AsParquetDataset() (*ParquetDataset,
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for AzureDataExplorerTableDataset.
+func (adetd AzureDataExplorerTableDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for AzureDataExplorerTableDataset.
func (adetd AzureDataExplorerTableDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -16255,7 +18954,7 @@ type AzureDataLakeAnalyticsLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -16407,6 +19106,11 @@ func (adlals AzureDataLakeAnalyticsLinkedService) AsMarketoLinkedService() (*Mar
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for AzureDataLakeAnalyticsLinkedService.
+func (adlals AzureDataLakeAnalyticsLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for AzureDataLakeAnalyticsLinkedService.
func (adlals AzureDataLakeAnalyticsLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -16954,7 +19658,7 @@ type AzureDataLakeStoreDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -17090,6 +19794,11 @@ func (adlsd AzureDataLakeStoreDataset) AsMarketoObjectDataset() (*MarketoObjectD
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for AzureDataLakeStoreDataset.
+func (adlsd AzureDataLakeStoreDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for AzureDataLakeStoreDataset.
func (adlsd AzureDataLakeStoreDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -17215,6 +19924,16 @@ func (adlsd AzureDataLakeStoreDataset) AsSapCloudForCustomerResourceDataset() (*
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for AzureDataLakeStoreDataset.
+func (adlsd AzureDataLakeStoreDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for AzureDataLakeStoreDataset.
+func (adlsd AzureDataLakeStoreDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for AzureDataLakeStoreDataset.
func (adlsd AzureDataLakeStoreDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -17255,6 +19974,16 @@ func (adlsd AzureDataLakeStoreDataset) AsRelationalTableDataset() (*RelationalTa
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for AzureDataLakeStoreDataset.
+func (adlsd AzureDataLakeStoreDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for AzureDataLakeStoreDataset.
+func (adlsd AzureDataLakeStoreDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for AzureDataLakeStoreDataset.
func (adlsd AzureDataLakeStoreDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -17370,6 +20099,11 @@ func (adlsd AzureDataLakeStoreDataset) AsBinaryDataset() (*BinaryDataset, bool)
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for AzureDataLakeStoreDataset.
+func (adlsd AzureDataLakeStoreDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for AzureDataLakeStoreDataset.
func (adlsd AzureDataLakeStoreDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -17380,6 +20114,11 @@ func (adlsd AzureDataLakeStoreDataset) AsParquetDataset() (*ParquetDataset, bool
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for AzureDataLakeStoreDataset.
+func (adlsd AzureDataLakeStoreDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for AzureDataLakeStoreDataset.
func (adlsd AzureDataLakeStoreDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -17578,7 +20317,7 @@ type AzureDataLakeStoreLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -17730,6 +20469,11 @@ func (adlsls AzureDataLakeStoreLinkedService) AsMarketoLinkedService() (*Marketo
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for AzureDataLakeStoreLinkedService.
+func (adlsls AzureDataLakeStoreLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for AzureDataLakeStoreLinkedService.
func (adlsls AzureDataLakeStoreLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -18513,7 +21257,7 @@ type AzureDataLakeStoreSink struct {
SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzurePostgreSQLSink', 'TypeDelimitedTextSink'
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
Type TypeBasicCopySink `json:"type,omitempty"`
}
@@ -18671,6 +21415,11 @@ func (adlss AzureDataLakeStoreSink) AsParquetSink() (*ParquetSink, bool) {
return nil, false
}
+// AsAvroSink is the BasicCopySink implementation for AzureDataLakeStoreSink.
+func (adlss AzureDataLakeStoreSink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
// AsAzureTableSink is the BasicCopySink implementation for AzureDataLakeStoreSink.
func (adlss AzureDataLakeStoreSink) AsAzureTableSink() (*AzureTableSink, bool) {
return nil, false
@@ -18686,11 +21435,21 @@ func (adlss AzureDataLakeStoreSink) AsSapCloudForCustomerSink() (*SapCloudForCus
return nil, false
}
+// AsAzureMySQLSink is the BasicCopySink implementation for AzureDataLakeStoreSink.
+func (adlss AzureDataLakeStoreSink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
// AsAzurePostgreSQLSink is the BasicCopySink implementation for AzureDataLakeStoreSink.
func (adlss AzureDataLakeStoreSink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
return nil, false
}
+// AsJSONSink is the BasicCopySink implementation for AzureDataLakeStoreSink.
+func (adlss AzureDataLakeStoreSink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSink is the BasicCopySink implementation for AzureDataLakeStoreSink.
func (adlss AzureDataLakeStoreSink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
return nil, false
@@ -18817,7 +21576,7 @@ type AzureDataLakeStoreSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -18941,6 +21700,11 @@ func (adlss AzureDataLakeStoreSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for AzureDataLakeStoreSource.
+func (adlss AzureDataLakeStoreSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for AzureDataLakeStoreSource.
func (adlss AzureDataLakeStoreSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -19161,6 +21925,11 @@ func (adlss AzureDataLakeStoreSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for AzureDataLakeStoreSource.
+func (adlss AzureDataLakeStoreSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for AzureDataLakeStoreSource.
func (adlss AzureDataLakeStoreSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -19236,6 +22005,11 @@ func (adlss AzureDataLakeStoreSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for AzureDataLakeStoreSource.
+func (adlss AzureDataLakeStoreSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for AzureDataLakeStoreSource.
func (adlss AzureDataLakeStoreSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -19246,6 +22020,11 @@ func (adlss AzureDataLakeStoreSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for AzureDataLakeStoreSource.
+func (adlss AzureDataLakeStoreSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for AzureDataLakeStoreSource.
func (adlss AzureDataLakeStoreSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -19332,32 +22111,63 @@ func (adlss *AzureDataLakeStoreSource) UnmarshalJSON(body []byte) error {
type AzureDataLakeStoreWriteSettings struct {
// AdditionalProperties - Unmatched properties from the message are deserialized this collection
AdditionalProperties map[string]interface{} `json:""`
- // Type - The write setting type.
- Type *string `json:"type,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
// CopyBehavior - The type of copy behavior for copy sink.
CopyBehavior interface{} `json:"copyBehavior,omitempty"`
+ // Type - Possible values include: 'TypeStoreWriteSettings', 'TypeFileServerWriteSettings', 'TypeAzureDataLakeStoreWriteSettings', 'TypeAzureBlobFSWriteSettings', 'TypeAzureBlobStorageWriteSettings'
+ Type TypeBasicStoreWriteSettings `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for AzureDataLakeStoreWriteSettings.
func (adlsws AzureDataLakeStoreWriteSettings) MarshalJSON() ([]byte, error) {
+ adlsws.Type = TypeAzureDataLakeStoreWriteSettings
objectMap := make(map[string]interface{})
- if adlsws.Type != nil {
- objectMap["type"] = adlsws.Type
- }
if adlsws.MaxConcurrentConnections != nil {
objectMap["maxConcurrentConnections"] = adlsws.MaxConcurrentConnections
}
if adlsws.CopyBehavior != nil {
objectMap["copyBehavior"] = adlsws.CopyBehavior
}
+ if adlsws.Type != "" {
+ objectMap["type"] = adlsws.Type
+ }
for k, v := range adlsws.AdditionalProperties {
objectMap[k] = v
}
return json.Marshal(objectMap)
}
+// AsFileServerWriteSettings is the BasicStoreWriteSettings implementation for AzureDataLakeStoreWriteSettings.
+func (adlsws AzureDataLakeStoreWriteSettings) AsFileServerWriteSettings() (*FileServerWriteSettings, bool) {
+ return nil, false
+}
+
+// AsAzureDataLakeStoreWriteSettings is the BasicStoreWriteSettings implementation for AzureDataLakeStoreWriteSettings.
+func (adlsws AzureDataLakeStoreWriteSettings) AsAzureDataLakeStoreWriteSettings() (*AzureDataLakeStoreWriteSettings, bool) {
+ return &adlsws, true
+}
+
+// AsAzureBlobFSWriteSettings is the BasicStoreWriteSettings implementation for AzureDataLakeStoreWriteSettings.
+func (adlsws AzureDataLakeStoreWriteSettings) AsAzureBlobFSWriteSettings() (*AzureBlobFSWriteSettings, bool) {
+ return nil, false
+}
+
+// AsAzureBlobStorageWriteSettings is the BasicStoreWriteSettings implementation for AzureDataLakeStoreWriteSettings.
+func (adlsws AzureDataLakeStoreWriteSettings) AsAzureBlobStorageWriteSettings() (*AzureBlobStorageWriteSettings, bool) {
+ return nil, false
+}
+
+// AsStoreWriteSettings is the BasicStoreWriteSettings implementation for AzureDataLakeStoreWriteSettings.
+func (adlsws AzureDataLakeStoreWriteSettings) AsStoreWriteSettings() (*StoreWriteSettings, bool) {
+ return nil, false
+}
+
+// AsBasicStoreWriteSettings is the BasicStoreWriteSettings implementation for AzureDataLakeStoreWriteSettings.
+func (adlsws AzureDataLakeStoreWriteSettings) AsBasicStoreWriteSettings() (BasicStoreWriteSettings, bool) {
+ return &adlsws, true
+}
+
// UnmarshalJSON is the custom unmarshaler for AzureDataLakeStoreWriteSettings struct.
func (adlsws *AzureDataLakeStoreWriteSettings) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
@@ -19379,15 +22189,6 @@ func (adlsws *AzureDataLakeStoreWriteSettings) UnmarshalJSON(body []byte) error
}
adlsws.AdditionalProperties[k] = additionalProperties
}
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- adlsws.Type = &typeVar
- }
case "maxConcurrentConnections":
if v != nil {
var maxConcurrentConnections interface{}
@@ -19406,6 +22207,15 @@ func (adlsws *AzureDataLakeStoreWriteSettings) UnmarshalJSON(body []byte) error
}
adlsws.CopyBehavior = copyBehavior
}
+ case "type":
+ if v != nil {
+ var typeVar TypeBasicStoreWriteSettings
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ adlsws.Type = typeVar
+ }
}
}
@@ -19778,7 +22588,7 @@ type AzureFunctionLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -19930,6 +22740,11 @@ func (afls AzureFunctionLinkedService) AsMarketoLinkedService() (*MarketoLinkedS
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for AzureFunctionLinkedService.
+func (afls AzureFunctionLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for AzureFunctionLinkedService.
func (afls AzureFunctionLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -20416,7 +23231,7 @@ type AzureKeyVaultLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -20568,6 +23383,11 @@ func (akvls AzureKeyVaultLinkedService) AsMarketoLinkedService() (*MarketoLinked
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for AzureKeyVaultLinkedService.
+func (akvls AzureKeyVaultLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for AzureKeyVaultLinkedService.
func (akvls AzureKeyVaultLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -21046,6 +23866,1755 @@ func (akvsr AzureKeyVaultSecretReference) AsBasicSecretBase() (BasicSecretBase,
return &akvsr, true
}
+// AzureMariaDBLinkedService azure Database for MariaDB linked service.
+type AzureMariaDBLinkedService struct {
+ // AzureMariaDBLinkedServiceTypeProperties - Azure Database for MariaDB linked service properties.
+ *AzureMariaDBLinkedServiceTypeProperties `json:"typeProperties,omitempty"`
+ // AdditionalProperties - Unmatched properties from the message are deserialized this collection
+ AdditionalProperties map[string]interface{} `json:""`
+ // ConnectVia - The integration runtime reference.
+ ConnectVia *IntegrationRuntimeReference `json:"connectVia,omitempty"`
+ // Description - Linked service description.
+ Description *string `json:"description,omitempty"`
+ // Parameters - Parameters for linked service.
+ Parameters map[string]*ParameterSpecification `json:"parameters"`
+ // Annotations - List of tags that can be used for describing the linked service.
+ Annotations *[]interface{} `json:"annotations,omitempty"`
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ Type TypeBasicLinkedService `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) MarshalJSON() ([]byte, error) {
+ amdls.Type = TypeAzureMariaDB
+ objectMap := make(map[string]interface{})
+ if amdls.AzureMariaDBLinkedServiceTypeProperties != nil {
+ objectMap["typeProperties"] = amdls.AzureMariaDBLinkedServiceTypeProperties
+ }
+ if amdls.ConnectVia != nil {
+ objectMap["connectVia"] = amdls.ConnectVia
+ }
+ if amdls.Description != nil {
+ objectMap["description"] = amdls.Description
+ }
+ if amdls.Parameters != nil {
+ objectMap["parameters"] = amdls.Parameters
+ }
+ if amdls.Annotations != nil {
+ objectMap["annotations"] = amdls.Annotations
+ }
+ if amdls.Type != "" {
+ objectMap["type"] = amdls.Type
+ }
+ for k, v := range amdls.AdditionalProperties {
+ objectMap[k] = v
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsAzureFunctionLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsAzureFunctionLinkedService() (*AzureFunctionLinkedService, bool) {
+ return nil, false
+}
+
+// AsAzureDataExplorerLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsAzureDataExplorerLinkedService() (*AzureDataExplorerLinkedService, bool) {
+ return nil, false
+}
+
+// AsSapTableLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsSapTableLinkedService() (*SapTableLinkedService, bool) {
+ return nil, false
+}
+
+// AsGoogleAdWordsLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsGoogleAdWordsLinkedService() (*GoogleAdWordsLinkedService, bool) {
+ return nil, false
+}
+
+// AsOracleServiceCloudLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsOracleServiceCloudLinkedService() (*OracleServiceCloudLinkedService, bool) {
+ return nil, false
+}
+
+// AsDynamicsAXLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsDynamicsAXLinkedService() (*DynamicsAXLinkedService, bool) {
+ return nil, false
+}
+
+// AsResponsysLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsResponsysLinkedService() (*ResponsysLinkedService, bool) {
+ return nil, false
+}
+
+// AsAzureDatabricksLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsAzureDatabricksLinkedService() (*AzureDatabricksLinkedService, bool) {
+ return nil, false
+}
+
+// AsAzureDataLakeAnalyticsLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsAzureDataLakeAnalyticsLinkedService() (*AzureDataLakeAnalyticsLinkedService, bool) {
+ return nil, false
+}
+
+// AsHDInsightOnDemandLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsHDInsightOnDemandLinkedService() (*HDInsightOnDemandLinkedService, bool) {
+ return nil, false
+}
+
+// AsSalesforceMarketingCloudLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsSalesforceMarketingCloudLinkedService() (*SalesforceMarketingCloudLinkedService, bool) {
+ return nil, false
+}
+
+// AsNetezzaLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsNetezzaLinkedService() (*NetezzaLinkedService, bool) {
+ return nil, false
+}
+
+// AsVerticaLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsVerticaLinkedService() (*VerticaLinkedService, bool) {
+ return nil, false
+}
+
+// AsZohoLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsZohoLinkedService() (*ZohoLinkedService, bool) {
+ return nil, false
+}
+
+// AsXeroLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsXeroLinkedService() (*XeroLinkedService, bool) {
+ return nil, false
+}
+
+// AsSquareLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsSquareLinkedService() (*SquareLinkedService, bool) {
+ return nil, false
+}
+
+// AsSparkLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsSparkLinkedService() (*SparkLinkedService, bool) {
+ return nil, false
+}
+
+// AsShopifyLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsShopifyLinkedService() (*ShopifyLinkedService, bool) {
+ return nil, false
+}
+
+// AsServiceNowLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsServiceNowLinkedService() (*ServiceNowLinkedService, bool) {
+ return nil, false
+}
+
+// AsQuickBooksLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsQuickBooksLinkedService() (*QuickBooksLinkedService, bool) {
+ return nil, false
+}
+
+// AsPrestoLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsPrestoLinkedService() (*PrestoLinkedService, bool) {
+ return nil, false
+}
+
+// AsPhoenixLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsPhoenixLinkedService() (*PhoenixLinkedService, bool) {
+ return nil, false
+}
+
+// AsPaypalLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsPaypalLinkedService() (*PaypalLinkedService, bool) {
+ return nil, false
+}
+
+// AsMarketoLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsMarketoLinkedService() (*MarketoLinkedService, bool) {
+ return nil, false
+}
+
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return &amdls, true
+}
+
+// AsMariaDBLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
+ return nil, false
+}
+
+// AsMagentoLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsMagentoLinkedService() (*MagentoLinkedService, bool) {
+ return nil, false
+}
+
+// AsJiraLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsJiraLinkedService() (*JiraLinkedService, bool) {
+ return nil, false
+}
+
+// AsImpalaLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsImpalaLinkedService() (*ImpalaLinkedService, bool) {
+ return nil, false
+}
+
+// AsHubspotLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsHubspotLinkedService() (*HubspotLinkedService, bool) {
+ return nil, false
+}
+
+// AsHiveLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsHiveLinkedService() (*HiveLinkedService, bool) {
+ return nil, false
+}
+
+// AsHBaseLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsHBaseLinkedService() (*HBaseLinkedService, bool) {
+ return nil, false
+}
+
+// AsGreenplumLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsGreenplumLinkedService() (*GreenplumLinkedService, bool) {
+ return nil, false
+}
+
+// AsGoogleBigQueryLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsGoogleBigQueryLinkedService() (*GoogleBigQueryLinkedService, bool) {
+ return nil, false
+}
+
+// AsEloquaLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsEloquaLinkedService() (*EloquaLinkedService, bool) {
+ return nil, false
+}
+
+// AsDrillLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsDrillLinkedService() (*DrillLinkedService, bool) {
+ return nil, false
+}
+
+// AsCouchbaseLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsCouchbaseLinkedService() (*CouchbaseLinkedService, bool) {
+ return nil, false
+}
+
+// AsConcurLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsConcurLinkedService() (*ConcurLinkedService, bool) {
+ return nil, false
+}
+
+// AsAzurePostgreSQLLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsAzurePostgreSQLLinkedService() (*AzurePostgreSQLLinkedService, bool) {
+ return nil, false
+}
+
+// AsAmazonMWSLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsAmazonMWSLinkedService() (*AmazonMWSLinkedService, bool) {
+ return nil, false
+}
+
+// AsSapHanaLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsSapHanaLinkedService() (*SapHanaLinkedService, bool) {
+ return nil, false
+}
+
+// AsSapBWLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsSapBWLinkedService() (*SapBWLinkedService, bool) {
+ return nil, false
+}
+
+// AsSftpServerLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsSftpServerLinkedService() (*SftpServerLinkedService, bool) {
+ return nil, false
+}
+
+// AsFtpServerLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsFtpServerLinkedService() (*FtpServerLinkedService, bool) {
+ return nil, false
+}
+
+// AsHTTPLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsHTTPLinkedService() (*HTTPLinkedService, bool) {
+ return nil, false
+}
+
+// AsAzureSearchLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsAzureSearchLinkedService() (*AzureSearchLinkedService, bool) {
+ return nil, false
+}
+
+// AsCustomDataSourceLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsCustomDataSourceLinkedService() (*CustomDataSourceLinkedService, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsAmazonRedshiftLinkedService() (*AmazonRedshiftLinkedService, bool) {
+ return nil, false
+}
+
+// AsAmazonS3LinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsAmazonS3LinkedService() (*AmazonS3LinkedService, bool) {
+ return nil, false
+}
+
+// AsRestServiceLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsRestServiceLinkedService() (*RestServiceLinkedService, bool) {
+ return nil, false
+}
+
+// AsSapOpenHubLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsSapOpenHubLinkedService() (*SapOpenHubLinkedService, bool) {
+ return nil, false
+}
+
+// AsSapEccLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsSapEccLinkedService() (*SapEccLinkedService, bool) {
+ return nil, false
+}
+
+// AsSapCloudForCustomerLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsSapCloudForCustomerLinkedService() (*SapCloudForCustomerLinkedService, bool) {
+ return nil, false
+}
+
+// AsSalesforceServiceCloudLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsSalesforceServiceCloudLinkedService() (*SalesforceServiceCloudLinkedService, bool) {
+ return nil, false
+}
+
+// AsSalesforceLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsSalesforceLinkedService() (*SalesforceLinkedService, bool) {
+ return nil, false
+}
+
+// AsOffice365LinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsOffice365LinkedService() (*Office365LinkedService, bool) {
+ return nil, false
+}
+
+// AsAzureBlobFSLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsAzureBlobFSLinkedService() (*AzureBlobFSLinkedService, bool) {
+ return nil, false
+}
+
+// AsAzureDataLakeStoreLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsAzureDataLakeStoreLinkedService() (*AzureDataLakeStoreLinkedService, bool) {
+ return nil, false
+}
+
+// AsCosmosDbMongoDbAPILinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsCosmosDbMongoDbAPILinkedService() (*CosmosDbMongoDbAPILinkedService, bool) {
+ return nil, false
+}
+
+// AsMongoDbV2LinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsMongoDbV2LinkedService() (*MongoDbV2LinkedService, bool) {
+ return nil, false
+}
+
+// AsMongoDbLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsMongoDbLinkedService() (*MongoDbLinkedService, bool) {
+ return nil, false
+}
+
+// AsCassandraLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsCassandraLinkedService() (*CassandraLinkedService, bool) {
+ return nil, false
+}
+
+// AsWebLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsWebLinkedService() (*WebLinkedService, bool) {
+ return nil, false
+}
+
+// AsODataLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsODataLinkedService() (*ODataLinkedService, bool) {
+ return nil, false
+}
+
+// AsHdfsLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsHdfsLinkedService() (*HdfsLinkedService, bool) {
+ return nil, false
+}
+
+// AsMicrosoftAccessLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsMicrosoftAccessLinkedService() (*MicrosoftAccessLinkedService, bool) {
+ return nil, false
+}
+
+// AsInformixLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsInformixLinkedService() (*InformixLinkedService, bool) {
+ return nil, false
+}
+
+// AsOdbcLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsOdbcLinkedService() (*OdbcLinkedService, bool) {
+ return nil, false
+}
+
+// AsAzureMLLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsAzureMLLinkedService() (*AzureMLLinkedService, bool) {
+ return nil, false
+}
+
+// AsTeradataLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsTeradataLinkedService() (*TeradataLinkedService, bool) {
+ return nil, false
+}
+
+// AsDb2LinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsDb2LinkedService() (*Db2LinkedService, bool) {
+ return nil, false
+}
+
+// AsSybaseLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsSybaseLinkedService() (*SybaseLinkedService, bool) {
+ return nil, false
+}
+
+// AsPostgreSQLLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsPostgreSQLLinkedService() (*PostgreSQLLinkedService, bool) {
+ return nil, false
+}
+
+// AsMySQLLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsMySQLLinkedService() (*MySQLLinkedService, bool) {
+ return nil, false
+}
+
+// AsAzureMySQLLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsAzureMySQLLinkedService() (*AzureMySQLLinkedService, bool) {
+ return nil, false
+}
+
+// AsOracleLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsOracleLinkedService() (*OracleLinkedService, bool) {
+ return nil, false
+}
+
+// AsFileServerLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsFileServerLinkedService() (*FileServerLinkedService, bool) {
+ return nil, false
+}
+
+// AsHDInsightLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsHDInsightLinkedService() (*HDInsightLinkedService, bool) {
+ return nil, false
+}
+
+// AsCommonDataServiceForAppsLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsCommonDataServiceForAppsLinkedService() (*CommonDataServiceForAppsLinkedService, bool) {
+ return nil, false
+}
+
+// AsDynamicsCrmLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsDynamicsCrmLinkedService() (*DynamicsCrmLinkedService, bool) {
+ return nil, false
+}
+
+// AsDynamicsLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsDynamicsLinkedService() (*DynamicsLinkedService, bool) {
+ return nil, false
+}
+
+// AsCosmosDbLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsCosmosDbLinkedService() (*CosmosDbLinkedService, bool) {
+ return nil, false
+}
+
+// AsAzureKeyVaultLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsAzureKeyVaultLinkedService() (*AzureKeyVaultLinkedService, bool) {
+ return nil, false
+}
+
+// AsAzureBatchLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsAzureBatchLinkedService() (*AzureBatchLinkedService, bool) {
+ return nil, false
+}
+
+// AsAzureSQLMILinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsAzureSQLMILinkedService() (*AzureSQLMILinkedService, bool) {
+ return nil, false
+}
+
+// AsAzureSQLDatabaseLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsAzureSQLDatabaseLinkedService() (*AzureSQLDatabaseLinkedService, bool) {
+ return nil, false
+}
+
+// AsSQLServerLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsSQLServerLinkedService() (*SQLServerLinkedService, bool) {
+ return nil, false
+}
+
+// AsAzureSQLDWLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsAzureSQLDWLinkedService() (*AzureSQLDWLinkedService, bool) {
+ return nil, false
+}
+
+// AsAzureTableStorageLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsAzureTableStorageLinkedService() (*AzureTableStorageLinkedService, bool) {
+ return nil, false
+}
+
+// AsAzureBlobStorageLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsAzureBlobStorageLinkedService() (*AzureBlobStorageLinkedService, bool) {
+ return nil, false
+}
+
+// AsAzureStorageLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsAzureStorageLinkedService() (*AzureStorageLinkedService, bool) {
+ return nil, false
+}
+
+// AsLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsLinkedService() (*LinkedService, bool) {
+ return nil, false
+}
+
+// AsBasicLinkedService is the BasicLinkedService implementation for AzureMariaDBLinkedService.
+func (amdls AzureMariaDBLinkedService) AsBasicLinkedService() (BasicLinkedService, bool) {
+ return &amdls, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for AzureMariaDBLinkedService struct.
+func (amdls *AzureMariaDBLinkedService) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "typeProperties":
+ if v != nil {
+ var azureMariaDBLinkedServiceTypeProperties AzureMariaDBLinkedServiceTypeProperties
+ err = json.Unmarshal(*v, &azureMariaDBLinkedServiceTypeProperties)
+ if err != nil {
+ return err
+ }
+ amdls.AzureMariaDBLinkedServiceTypeProperties = &azureMariaDBLinkedServiceTypeProperties
+ }
+ default:
+ if v != nil {
+ var additionalProperties interface{}
+ err = json.Unmarshal(*v, &additionalProperties)
+ if err != nil {
+ return err
+ }
+ if amdls.AdditionalProperties == nil {
+ amdls.AdditionalProperties = make(map[string]interface{})
+ }
+ amdls.AdditionalProperties[k] = additionalProperties
+ }
+ case "connectVia":
+ if v != nil {
+ var connectVia IntegrationRuntimeReference
+ err = json.Unmarshal(*v, &connectVia)
+ if err != nil {
+ return err
+ }
+ amdls.ConnectVia = &connectVia
+ }
+ case "description":
+ if v != nil {
+ var description string
+ err = json.Unmarshal(*v, &description)
+ if err != nil {
+ return err
+ }
+ amdls.Description = &description
+ }
+ case "parameters":
+ if v != nil {
+ var parameters map[string]*ParameterSpecification
+ err = json.Unmarshal(*v, ¶meters)
+ if err != nil {
+ return err
+ }
+ amdls.Parameters = parameters
+ }
+ case "annotations":
+ if v != nil {
+ var annotations []interface{}
+ err = json.Unmarshal(*v, &annotations)
+ if err != nil {
+ return err
+ }
+ amdls.Annotations = &annotations
+ }
+ case "type":
+ if v != nil {
+ var typeVar TypeBasicLinkedService
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ amdls.Type = typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// AzureMariaDBLinkedServiceTypeProperties azure Database for MariaDB linked service properties.
+type AzureMariaDBLinkedServiceTypeProperties struct {
+ // ConnectionString - An ODBC connection string. Type: string, SecureString or AzureKeyVaultSecretReference.
+ ConnectionString interface{} `json:"connectionString,omitempty"`
+ // Pwd - The Azure key vault secret reference of password in connection string.
+ Pwd *AzureKeyVaultSecretReference `json:"pwd,omitempty"`
+ // EncryptedCredential - The encrypted credential used for authentication. Credentials are encrypted using the integration runtime credential manager. Type: string (or Expression with resultType string).
+ EncryptedCredential interface{} `json:"encryptedCredential,omitempty"`
+}
+
+// AzureMariaDBSource a copy activity Azure MariaDB source.
+type AzureMariaDBSource struct {
+ // Query - A query to retrieve data from source. Type: string (or Expression with resultType string).
+ Query interface{} `json:"query,omitempty"`
+ // AdditionalProperties - Unmatched properties from the message are deserialized this collection
+ AdditionalProperties map[string]interface{} `json:""`
+ // SourceRetryCount - Source retry count. Type: integer (or Expression with resultType integer).
+ SourceRetryCount interface{} `json:"sourceRetryCount,omitempty"`
+ // SourceRetryWait - Source retry wait. Type: string (or Expression with resultType string), pattern: ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
+ // MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
+ MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
+ Type TypeBasicCopySource `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for AzureMariaDBSource.
+func (amds AzureMariaDBSource) MarshalJSON() ([]byte, error) {
+ amds.Type = TypeAzureMariaDBSource
+ objectMap := make(map[string]interface{})
+ if amds.Query != nil {
+ objectMap["query"] = amds.Query
+ }
+ if amds.SourceRetryCount != nil {
+ objectMap["sourceRetryCount"] = amds.SourceRetryCount
+ }
+ if amds.SourceRetryWait != nil {
+ objectMap["sourceRetryWait"] = amds.SourceRetryWait
+ }
+ if amds.MaxConcurrentConnections != nil {
+ objectMap["maxConcurrentConnections"] = amds.MaxConcurrentConnections
+ }
+ if amds.Type != "" {
+ objectMap["type"] = amds.Type
+ }
+ for k, v := range amds.AdditionalProperties {
+ objectMap[k] = v
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsAmazonRedshiftSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsAmazonRedshiftSource() (*AmazonRedshiftSource, bool) {
+ return nil, false
+}
+
+// AsGoogleAdWordsSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsGoogleAdWordsSource() (*GoogleAdWordsSource, bool) {
+ return nil, false
+}
+
+// AsOracleServiceCloudSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsOracleServiceCloudSource() (*OracleServiceCloudSource, bool) {
+ return nil, false
+}
+
+// AsDynamicsAXSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsDynamicsAXSource() (*DynamicsAXSource, bool) {
+ return nil, false
+}
+
+// AsResponsysSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsResponsysSource() (*ResponsysSource, bool) {
+ return nil, false
+}
+
+// AsSalesforceMarketingCloudSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsSalesforceMarketingCloudSource() (*SalesforceMarketingCloudSource, bool) {
+ return nil, false
+}
+
+// AsVerticaSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsVerticaSource() (*VerticaSource, bool) {
+ return nil, false
+}
+
+// AsNetezzaSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsNetezzaSource() (*NetezzaSource, bool) {
+ return nil, false
+}
+
+// AsZohoSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsZohoSource() (*ZohoSource, bool) {
+ return nil, false
+}
+
+// AsXeroSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsXeroSource() (*XeroSource, bool) {
+ return nil, false
+}
+
+// AsSquareSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsSquareSource() (*SquareSource, bool) {
+ return nil, false
+}
+
+// AsSparkSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsSparkSource() (*SparkSource, bool) {
+ return nil, false
+}
+
+// AsShopifySource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsShopifySource() (*ShopifySource, bool) {
+ return nil, false
+}
+
+// AsServiceNowSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsServiceNowSource() (*ServiceNowSource, bool) {
+ return nil, false
+}
+
+// AsQuickBooksSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsQuickBooksSource() (*QuickBooksSource, bool) {
+ return nil, false
+}
+
+// AsPrestoSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsPrestoSource() (*PrestoSource, bool) {
+ return nil, false
+}
+
+// AsPhoenixSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsPhoenixSource() (*PhoenixSource, bool) {
+ return nil, false
+}
+
+// AsPaypalSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsPaypalSource() (*PaypalSource, bool) {
+ return nil, false
+}
+
+// AsMarketoSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsMarketoSource() (*MarketoSource, bool) {
+ return nil, false
+}
+
+// AsAzureMariaDBSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return &amds, true
+}
+
+// AsMariaDBSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsMariaDBSource() (*MariaDBSource, bool) {
+ return nil, false
+}
+
+// AsMagentoSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsMagentoSource() (*MagentoSource, bool) {
+ return nil, false
+}
+
+// AsJiraSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsJiraSource() (*JiraSource, bool) {
+ return nil, false
+}
+
+// AsImpalaSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsImpalaSource() (*ImpalaSource, bool) {
+ return nil, false
+}
+
+// AsHubspotSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsHubspotSource() (*HubspotSource, bool) {
+ return nil, false
+}
+
+// AsHiveSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsHiveSource() (*HiveSource, bool) {
+ return nil, false
+}
+
+// AsHBaseSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsHBaseSource() (*HBaseSource, bool) {
+ return nil, false
+}
+
+// AsGreenplumSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsGreenplumSource() (*GreenplumSource, bool) {
+ return nil, false
+}
+
+// AsGoogleBigQuerySource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsGoogleBigQuerySource() (*GoogleBigQuerySource, bool) {
+ return nil, false
+}
+
+// AsEloquaSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsEloquaSource() (*EloquaSource, bool) {
+ return nil, false
+}
+
+// AsDrillSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsDrillSource() (*DrillSource, bool) {
+ return nil, false
+}
+
+// AsCouchbaseSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsCouchbaseSource() (*CouchbaseSource, bool) {
+ return nil, false
+}
+
+// AsConcurSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsConcurSource() (*ConcurSource, bool) {
+ return nil, false
+}
+
+// AsAzurePostgreSQLSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsAzurePostgreSQLSource() (*AzurePostgreSQLSource, bool) {
+ return nil, false
+}
+
+// AsAmazonMWSSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsAmazonMWSSource() (*AmazonMWSSource, bool) {
+ return nil, false
+}
+
+// AsHTTPSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsHTTPSource() (*HTTPSource, bool) {
+ return nil, false
+}
+
+// AsAzureBlobFSSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsAzureBlobFSSource() (*AzureBlobFSSource, bool) {
+ return nil, false
+}
+
+// AsAzureDataLakeStoreSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsAzureDataLakeStoreSource() (*AzureDataLakeStoreSource, bool) {
+ return nil, false
+}
+
+// AsOffice365Source is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsOffice365Source() (*Office365Source, bool) {
+ return nil, false
+}
+
+// AsCosmosDbMongoDbAPISource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsCosmosDbMongoDbAPISource() (*CosmosDbMongoDbAPISource, bool) {
+ return nil, false
+}
+
+// AsMongoDbV2Source is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsMongoDbV2Source() (*MongoDbV2Source, bool) {
+ return nil, false
+}
+
+// AsMongoDbSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsMongoDbSource() (*MongoDbSource, bool) {
+ return nil, false
+}
+
+// AsCassandraSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsCassandraSource() (*CassandraSource, bool) {
+ return nil, false
+}
+
+// AsWebSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsWebSource() (*WebSource, bool) {
+ return nil, false
+}
+
+// AsTeradataSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsTeradataSource() (*TeradataSource, bool) {
+ return nil, false
+}
+
+// AsOracleSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsOracleSource() (*OracleSource, bool) {
+ return nil, false
+}
+
+// AsAzureDataExplorerSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsAzureDataExplorerSource() (*AzureDataExplorerSource, bool) {
+ return nil, false
+}
+
+// AsAzureMySQLSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsAzureMySQLSource() (*AzureMySQLSource, bool) {
+ return nil, false
+}
+
+// AsHdfsSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsHdfsSource() (*HdfsSource, bool) {
+ return nil, false
+}
+
+// AsFileSystemSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsFileSystemSource() (*FileSystemSource, bool) {
+ return nil, false
+}
+
+// AsSQLDWSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsSQLDWSource() (*SQLDWSource, bool) {
+ return nil, false
+}
+
+// AsSQLMISource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsSQLMISource() (*SQLMISource, bool) {
+ return nil, false
+}
+
+// AsAzureSQLSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsAzureSQLSource() (*AzureSQLSource, bool) {
+ return nil, false
+}
+
+// AsSQLServerSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsSQLServerSource() (*SQLServerSource, bool) {
+ return nil, false
+}
+
+// AsSQLSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsSQLSource() (*SQLSource, bool) {
+ return nil, false
+}
+
+// AsRestSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsRestSource() (*RestSource, bool) {
+ return nil, false
+}
+
+// AsSapTableSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsSapTableSource() (*SapTableSource, bool) {
+ return nil, false
+}
+
+// AsSapOpenHubSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsSapOpenHubSource() (*SapOpenHubSource, bool) {
+ return nil, false
+}
+
+// AsSapHanaSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsSapHanaSource() (*SapHanaSource, bool) {
+ return nil, false
+}
+
+// AsSapEccSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsSapEccSource() (*SapEccSource, bool) {
+ return nil, false
+}
+
+// AsSapCloudForCustomerSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsSapCloudForCustomerSource() (*SapCloudForCustomerSource, bool) {
+ return nil, false
+}
+
+// AsSalesforceServiceCloudSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsSalesforceServiceCloudSource() (*SalesforceServiceCloudSource, bool) {
+ return nil, false
+}
+
+// AsSalesforceSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsSalesforceSource() (*SalesforceSource, bool) {
+ return nil, false
+}
+
+// AsODataSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsODataSource() (*ODataSource, bool) {
+ return nil, false
+}
+
+// AsSapBwSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
+// AsSybaseSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsSybaseSource() (*SybaseSource, bool) {
+ return nil, false
+}
+
+// AsPostgreSQLSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsPostgreSQLSource() (*PostgreSQLSource, bool) {
+ return nil, false
+}
+
+// AsMySQLSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsMySQLSource() (*MySQLSource, bool) {
+ return nil, false
+}
+
+// AsOdbcSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsOdbcSource() (*OdbcSource, bool) {
+ return nil, false
+}
+
+// AsDb2Source is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsDb2Source() (*Db2Source, bool) {
+ return nil, false
+}
+
+// AsMicrosoftAccessSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsMicrosoftAccessSource() (*MicrosoftAccessSource, bool) {
+ return nil, false
+}
+
+// AsInformixSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsInformixSource() (*InformixSource, bool) {
+ return nil, false
+}
+
+// AsRelationalSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsRelationalSource() (*RelationalSource, bool) {
+ return nil, false
+}
+
+// AsCommonDataServiceForAppsSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsCommonDataServiceForAppsSource() (*CommonDataServiceForAppsSource, bool) {
+ return nil, false
+}
+
+// AsDynamicsCrmSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsDynamicsCrmSource() (*DynamicsCrmSource, bool) {
+ return nil, false
+}
+
+// AsDynamicsSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsDynamicsSource() (*DynamicsSource, bool) {
+ return nil, false
+}
+
+// AsDocumentDbCollectionSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsDocumentDbCollectionSource() (*DocumentDbCollectionSource, bool) {
+ return nil, false
+}
+
+// AsBlobSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsBlobSource() (*BlobSource, bool) {
+ return nil, false
+}
+
+// AsAzureTableSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsAzureTableSource() (*AzureTableSource, bool) {
+ return nil, false
+}
+
+// AsBinarySource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsBinarySource() (*BinarySource, bool) {
+ return nil, false
+}
+
+// AsJSONSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
+// AsDelimitedTextSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
+ return nil, false
+}
+
+// AsParquetSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsParquetSource() (*ParquetSource, bool) {
+ return nil, false
+}
+
+// AsAvroSource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
+// AsCopySource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsCopySource() (*CopySource, bool) {
+ return nil, false
+}
+
+// AsBasicCopySource is the BasicCopySource implementation for AzureMariaDBSource.
+func (amds AzureMariaDBSource) AsBasicCopySource() (BasicCopySource, bool) {
+ return &amds, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for AzureMariaDBSource struct.
+func (amds *AzureMariaDBSource) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "query":
+ if v != nil {
+ var query interface{}
+ err = json.Unmarshal(*v, &query)
+ if err != nil {
+ return err
+ }
+ amds.Query = query
+ }
+ default:
+ if v != nil {
+ var additionalProperties interface{}
+ err = json.Unmarshal(*v, &additionalProperties)
+ if err != nil {
+ return err
+ }
+ if amds.AdditionalProperties == nil {
+ amds.AdditionalProperties = make(map[string]interface{})
+ }
+ amds.AdditionalProperties[k] = additionalProperties
+ }
+ case "sourceRetryCount":
+ if v != nil {
+ var sourceRetryCount interface{}
+ err = json.Unmarshal(*v, &sourceRetryCount)
+ if err != nil {
+ return err
+ }
+ amds.SourceRetryCount = sourceRetryCount
+ }
+ case "sourceRetryWait":
+ if v != nil {
+ var sourceRetryWait interface{}
+ err = json.Unmarshal(*v, &sourceRetryWait)
+ if err != nil {
+ return err
+ }
+ amds.SourceRetryWait = sourceRetryWait
+ }
+ case "maxConcurrentConnections":
+ if v != nil {
+ var maxConcurrentConnections interface{}
+ err = json.Unmarshal(*v, &maxConcurrentConnections)
+ if err != nil {
+ return err
+ }
+ amds.MaxConcurrentConnections = maxConcurrentConnections
+ }
+ case "type":
+ if v != nil {
+ var typeVar TypeBasicCopySource
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ amds.Type = typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// AzureMariaDBTableDataset azure Database for MariaDB dataset.
+type AzureMariaDBTableDataset struct {
+ // GenericDatasetTypeProperties - Properties specific to this dataset type.
+ *GenericDatasetTypeProperties `json:"typeProperties,omitempty"`
+ // AdditionalProperties - Unmatched properties from the message are deserialized this collection
+ AdditionalProperties map[string]interface{} `json:""`
+ // Description - Dataset description.
+ Description *string `json:"description,omitempty"`
+ // Structure - Columns that define the structure of the dataset. Type: array (or Expression with resultType array), itemType: DatasetDataElement.
+ Structure interface{} `json:"structure,omitempty"`
+ // Schema - Columns that define the physical type schema of the dataset. Type: array (or Expression with resultType array), itemType: DatasetSchemaDataElement.
+ Schema interface{} `json:"schema,omitempty"`
+ // LinkedServiceName - Linked service reference.
+ LinkedServiceName *LinkedServiceReference `json:"linkedServiceName,omitempty"`
+ // Parameters - Parameters for dataset.
+ Parameters map[string]*ParameterSpecification `json:"parameters"`
+ // Annotations - List of tags that can be used for describing the Dataset.
+ Annotations *[]interface{} `json:"annotations,omitempty"`
+ // Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
+ Folder *DatasetFolder `json:"folder,omitempty"`
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
+ Type TypeBasicDataset `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) MarshalJSON() ([]byte, error) {
+ amdtd.Type = TypeAzureMariaDBTable
+ objectMap := make(map[string]interface{})
+ if amdtd.GenericDatasetTypeProperties != nil {
+ objectMap["typeProperties"] = amdtd.GenericDatasetTypeProperties
+ }
+ if amdtd.Description != nil {
+ objectMap["description"] = amdtd.Description
+ }
+ if amdtd.Structure != nil {
+ objectMap["structure"] = amdtd.Structure
+ }
+ if amdtd.Schema != nil {
+ objectMap["schema"] = amdtd.Schema
+ }
+ if amdtd.LinkedServiceName != nil {
+ objectMap["linkedServiceName"] = amdtd.LinkedServiceName
+ }
+ if amdtd.Parameters != nil {
+ objectMap["parameters"] = amdtd.Parameters
+ }
+ if amdtd.Annotations != nil {
+ objectMap["annotations"] = amdtd.Annotations
+ }
+ if amdtd.Folder != nil {
+ objectMap["folder"] = amdtd.Folder
+ }
+ if amdtd.Type != "" {
+ objectMap["type"] = amdtd.Type
+ }
+ for k, v := range amdtd.AdditionalProperties {
+ objectMap[k] = v
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsGoogleAdWordsObjectDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsGoogleAdWordsObjectDataset() (*GoogleAdWordsObjectDataset, bool) {
+ return nil, false
+}
+
+// AsAzureDataExplorerTableDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsAzureDataExplorerTableDataset() (*AzureDataExplorerTableDataset, bool) {
+ return nil, false
+}
+
+// AsOracleServiceCloudObjectDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsOracleServiceCloudObjectDataset() (*OracleServiceCloudObjectDataset, bool) {
+ return nil, false
+}
+
+// AsDynamicsAXResourceDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsDynamicsAXResourceDataset() (*DynamicsAXResourceDataset, bool) {
+ return nil, false
+}
+
+// AsResponsysObjectDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsResponsysObjectDataset() (*ResponsysObjectDataset, bool) {
+ return nil, false
+}
+
+// AsSalesforceMarketingCloudObjectDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsSalesforceMarketingCloudObjectDataset() (*SalesforceMarketingCloudObjectDataset, bool) {
+ return nil, false
+}
+
+// AsVerticaTableDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsVerticaTableDataset() (*VerticaTableDataset, bool) {
+ return nil, false
+}
+
+// AsNetezzaTableDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsNetezzaTableDataset() (*NetezzaTableDataset, bool) {
+ return nil, false
+}
+
+// AsZohoObjectDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsZohoObjectDataset() (*ZohoObjectDataset, bool) {
+ return nil, false
+}
+
+// AsXeroObjectDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsXeroObjectDataset() (*XeroObjectDataset, bool) {
+ return nil, false
+}
+
+// AsSquareObjectDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsSquareObjectDataset() (*SquareObjectDataset, bool) {
+ return nil, false
+}
+
+// AsSparkObjectDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsSparkObjectDataset() (*SparkObjectDataset, bool) {
+ return nil, false
+}
+
+// AsShopifyObjectDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsShopifyObjectDataset() (*ShopifyObjectDataset, bool) {
+ return nil, false
+}
+
+// AsServiceNowObjectDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsServiceNowObjectDataset() (*ServiceNowObjectDataset, bool) {
+ return nil, false
+}
+
+// AsQuickBooksObjectDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsQuickBooksObjectDataset() (*QuickBooksObjectDataset, bool) {
+ return nil, false
+}
+
+// AsPrestoObjectDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsPrestoObjectDataset() (*PrestoObjectDataset, bool) {
+ return nil, false
+}
+
+// AsPhoenixObjectDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsPhoenixObjectDataset() (*PhoenixObjectDataset, bool) {
+ return nil, false
+}
+
+// AsPaypalObjectDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsPaypalObjectDataset() (*PaypalObjectDataset, bool) {
+ return nil, false
+}
+
+// AsMarketoObjectDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsMarketoObjectDataset() (*MarketoObjectDataset, bool) {
+ return nil, false
+}
+
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return &amdtd, true
+}
+
+// AsMariaDBTableDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
+ return nil, false
+}
+
+// AsMagentoObjectDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsMagentoObjectDataset() (*MagentoObjectDataset, bool) {
+ return nil, false
+}
+
+// AsJiraObjectDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsJiraObjectDataset() (*JiraObjectDataset, bool) {
+ return nil, false
+}
+
+// AsImpalaObjectDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsImpalaObjectDataset() (*ImpalaObjectDataset, bool) {
+ return nil, false
+}
+
+// AsHubspotObjectDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsHubspotObjectDataset() (*HubspotObjectDataset, bool) {
+ return nil, false
+}
+
+// AsHiveObjectDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsHiveObjectDataset() (*HiveObjectDataset, bool) {
+ return nil, false
+}
+
+// AsHBaseObjectDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsHBaseObjectDataset() (*HBaseObjectDataset, bool) {
+ return nil, false
+}
+
+// AsGreenplumTableDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsGreenplumTableDataset() (*GreenplumTableDataset, bool) {
+ return nil, false
+}
+
+// AsGoogleBigQueryObjectDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsGoogleBigQueryObjectDataset() (*GoogleBigQueryObjectDataset, bool) {
+ return nil, false
+}
+
+// AsEloquaObjectDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsEloquaObjectDataset() (*EloquaObjectDataset, bool) {
+ return nil, false
+}
+
+// AsDrillTableDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsDrillTableDataset() (*DrillTableDataset, bool) {
+ return nil, false
+}
+
+// AsCouchbaseTableDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsCouchbaseTableDataset() (*CouchbaseTableDataset, bool) {
+ return nil, false
+}
+
+// AsConcurObjectDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsConcurObjectDataset() (*ConcurObjectDataset, bool) {
+ return nil, false
+}
+
+// AsAzurePostgreSQLTableDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsAzurePostgreSQLTableDataset() (*AzurePostgreSQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonMWSObjectDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsAmazonMWSObjectDataset() (*AmazonMWSObjectDataset, bool) {
+ return nil, false
+}
+
+// AsHTTPDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsHTTPDataset() (*HTTPDataset, bool) {
+ return nil, false
+}
+
+// AsAzureSearchIndexDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsAzureSearchIndexDataset() (*AzureSearchIndexDataset, bool) {
+ return nil, false
+}
+
+// AsWebTableDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsWebTableDataset() (*WebTableDataset, bool) {
+ return nil, false
+}
+
+// AsSapTableResourceDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsSapTableResourceDataset() (*SapTableResourceDataset, bool) {
+ return nil, false
+}
+
+// AsRestResourceDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsRestResourceDataset() (*RestResourceDataset, bool) {
+ return nil, false
+}
+
+// AsSQLServerTableDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsSQLServerTableDataset() (*SQLServerTableDataset, bool) {
+ return nil, false
+}
+
+// AsSapOpenHubTableDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsSapOpenHubTableDataset() (*SapOpenHubTableDataset, bool) {
+ return nil, false
+}
+
+// AsSapHanaTableDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsSapHanaTableDataset() (*SapHanaTableDataset, bool) {
+ return nil, false
+}
+
+// AsSapEccResourceDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsSapEccResourceDataset() (*SapEccResourceDataset, bool) {
+ return nil, false
+}
+
+// AsSapCloudForCustomerResourceDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsSapCloudForCustomerResourceDataset() (*SapCloudForCustomerResourceDataset, bool) {
+ return nil, false
+}
+
+// AsSapBwCubeDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
+// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
+ return nil, false
+}
+
+// AsSalesforceObjectDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsSalesforceObjectDataset() (*SalesforceObjectDataset, bool) {
+ return nil, false
+}
+
+// AsMicrosoftAccessTableDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsMicrosoftAccessTableDataset() (*MicrosoftAccessTableDataset, bool) {
+ return nil, false
+}
+
+// AsPostgreSQLTableDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsPostgreSQLTableDataset() (*PostgreSQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsMySQLTableDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsMySQLTableDataset() (*MySQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsOdbcTableDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsOdbcTableDataset() (*OdbcTableDataset, bool) {
+ return nil, false
+}
+
+// AsInformixTableDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsInformixTableDataset() (*InformixTableDataset, bool) {
+ return nil, false
+}
+
+// AsRelationalTableDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsRelationalTableDataset() (*RelationalTableDataset, bool) {
+ return nil, false
+}
+
+// AsDb2TableDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureMySQLTableDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsTeradataTableDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsTeradataTableDataset() (*TeradataTableDataset, bool) {
+ return nil, false
+}
+
+// AsOracleTableDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsOracleTableDataset() (*OracleTableDataset, bool) {
+ return nil, false
+}
+
+// AsODataResourceDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsODataResourceDataset() (*ODataResourceDataset, bool) {
+ return nil, false
+}
+
+// AsCosmosDbMongoDbAPICollectionDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsCosmosDbMongoDbAPICollectionDataset() (*CosmosDbMongoDbAPICollectionDataset, bool) {
+ return nil, false
+}
+
+// AsMongoDbV2CollectionDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsMongoDbV2CollectionDataset() (*MongoDbV2CollectionDataset, bool) {
+ return nil, false
+}
+
+// AsMongoDbCollectionDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsMongoDbCollectionDataset() (*MongoDbCollectionDataset, bool) {
+ return nil, false
+}
+
+// AsFileShareDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsFileShareDataset() (*FileShareDataset, bool) {
+ return nil, false
+}
+
+// AsOffice365Dataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsOffice365Dataset() (*Office365Dataset, bool) {
+ return nil, false
+}
+
+// AsAzureBlobFSDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsAzureBlobFSDataset() (*AzureBlobFSDataset, bool) {
+ return nil, false
+}
+
+// AsAzureDataLakeStoreDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsAzureDataLakeStoreDataset() (*AzureDataLakeStoreDataset, bool) {
+ return nil, false
+}
+
+// AsCommonDataServiceForAppsEntityDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsCommonDataServiceForAppsEntityDataset() (*CommonDataServiceForAppsEntityDataset, bool) {
+ return nil, false
+}
+
+// AsDynamicsCrmEntityDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsDynamicsCrmEntityDataset() (*DynamicsCrmEntityDataset, bool) {
+ return nil, false
+}
+
+// AsDynamicsEntityDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsDynamicsEntityDataset() (*DynamicsEntityDataset, bool) {
+ return nil, false
+}
+
+// AsDocumentDbCollectionDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsDocumentDbCollectionDataset() (*DocumentDbCollectionDataset, bool) {
+ return nil, false
+}
+
+// AsCustomDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsCustomDataset() (*CustomDataset, bool) {
+ return nil, false
+}
+
+// AsCassandraTableDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsCassandraTableDataset() (*CassandraTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureSQLDWTableDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsAzureSQLDWTableDataset() (*AzureSQLDWTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureSQLMITableDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsAzureSQLMITableDataset() (*AzureSQLMITableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureSQLTableDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsAzureSQLTableDataset() (*AzureSQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureTableDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsAzureTableDataset() (*AzureTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureBlobDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsAzureBlobDataset() (*AzureBlobDataset, bool) {
+ return nil, false
+}
+
+// AsBinaryDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsBinaryDataset() (*BinaryDataset, bool) {
+ return nil, false
+}
+
+// AsJSONDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
+// AsDelimitedTextDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
+ return nil, false
+}
+
+// AsParquetDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsParquetDataset() (*ParquetDataset, bool) {
+ return nil, false
+}
+
+// AsAvroDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonS3Dataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
+ return nil, false
+}
+
+// AsDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsDataset() (*Dataset, bool) {
+ return nil, false
+}
+
+// AsBasicDataset is the BasicDataset implementation for AzureMariaDBTableDataset.
+func (amdtd AzureMariaDBTableDataset) AsBasicDataset() (BasicDataset, bool) {
+ return &amdtd, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for AzureMariaDBTableDataset struct.
+func (amdtd *AzureMariaDBTableDataset) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "typeProperties":
+ if v != nil {
+ var genericDatasetTypeProperties GenericDatasetTypeProperties
+ err = json.Unmarshal(*v, &genericDatasetTypeProperties)
+ if err != nil {
+ return err
+ }
+ amdtd.GenericDatasetTypeProperties = &genericDatasetTypeProperties
+ }
+ default:
+ if v != nil {
+ var additionalProperties interface{}
+ err = json.Unmarshal(*v, &additionalProperties)
+ if err != nil {
+ return err
+ }
+ if amdtd.AdditionalProperties == nil {
+ amdtd.AdditionalProperties = make(map[string]interface{})
+ }
+ amdtd.AdditionalProperties[k] = additionalProperties
+ }
+ case "description":
+ if v != nil {
+ var description string
+ err = json.Unmarshal(*v, &description)
+ if err != nil {
+ return err
+ }
+ amdtd.Description = &description
+ }
+ case "structure":
+ if v != nil {
+ var structure interface{}
+ err = json.Unmarshal(*v, &structure)
+ if err != nil {
+ return err
+ }
+ amdtd.Structure = structure
+ }
+ case "schema":
+ if v != nil {
+ var schema interface{}
+ err = json.Unmarshal(*v, &schema)
+ if err != nil {
+ return err
+ }
+ amdtd.Schema = schema
+ }
+ case "linkedServiceName":
+ if v != nil {
+ var linkedServiceName LinkedServiceReference
+ err = json.Unmarshal(*v, &linkedServiceName)
+ if err != nil {
+ return err
+ }
+ amdtd.LinkedServiceName = &linkedServiceName
+ }
+ case "parameters":
+ if v != nil {
+ var parameters map[string]*ParameterSpecification
+ err = json.Unmarshal(*v, ¶meters)
+ if err != nil {
+ return err
+ }
+ amdtd.Parameters = parameters
+ }
+ case "annotations":
+ if v != nil {
+ var annotations []interface{}
+ err = json.Unmarshal(*v, &annotations)
+ if err != nil {
+ return err
+ }
+ amdtd.Annotations = &annotations
+ }
+ case "folder":
+ if v != nil {
+ var folder DatasetFolder
+ err = json.Unmarshal(*v, &folder)
+ if err != nil {
+ return err
+ }
+ amdtd.Folder = &folder
+ }
+ case "type":
+ if v != nil {
+ var typeVar TypeBasicDataset
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ amdtd.Type = typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
// AzureMLBatchExecutionActivity azure ML Batch Execution activity.
type AzureMLBatchExecutionActivity struct {
// AzureMLBatchExecutionActivityTypeProperties - Azure ML Batch Execution activity properties.
@@ -21425,7 +25994,7 @@ type AzureMLLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -21577,6 +26146,11 @@ func (amls AzureMLLinkedService) AsMarketoLinkedService() (*MarketoLinkedService
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for AzureMLLinkedService.
+func (amls AzureMLLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for AzureMLLinkedService.
func (amls AzureMLLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -22464,7 +27038,7 @@ type AzureMySQLLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -22616,6 +27190,11 @@ func (amsls AzureMySQLLinkedService) AsMarketoLinkedService() (*MarketoLinkedSer
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for AzureMySQLLinkedService.
+func (amsls AzureMySQLLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for AzureMySQLLinkedService.
func (amsls AzureMySQLLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -23047,6 +27626,317 @@ type AzureMySQLLinkedServiceTypeProperties struct {
EncryptedCredential interface{} `json:"encryptedCredential,omitempty"`
}
+// AzureMySQLSink a copy activity Azure MySql sink.
+type AzureMySQLSink struct {
+ // PreCopyScript - A query to execute before starting the copy. Type: string (or Expression with resultType string).
+ PreCopyScript interface{} `json:"preCopyScript,omitempty"`
+ // AdditionalProperties - Unmatched properties from the message are deserialized this collection
+ AdditionalProperties map[string]interface{} `json:""`
+ // WriteBatchSize - Write batch size. Type: integer (or Expression with resultType integer), minimum: 0.
+ WriteBatchSize interface{} `json:"writeBatchSize,omitempty"`
+ // WriteBatchTimeout - Write batch timeout. Type: string (or Expression with resultType string), pattern: ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ WriteBatchTimeout interface{} `json:"writeBatchTimeout,omitempty"`
+ // SinkRetryCount - Sink retry count. Type: integer (or Expression with resultType integer).
+ SinkRetryCount interface{} `json:"sinkRetryCount,omitempty"`
+ // SinkRetryWait - Sink retry wait. Type: string (or Expression with resultType string), pattern: ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
+ // MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
+ MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
+ Type TypeBasicCopySink `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for AzureMySQLSink.
+func (amss AzureMySQLSink) MarshalJSON() ([]byte, error) {
+ amss.Type = TypeAzureMySQLSink
+ objectMap := make(map[string]interface{})
+ if amss.PreCopyScript != nil {
+ objectMap["preCopyScript"] = amss.PreCopyScript
+ }
+ if amss.WriteBatchSize != nil {
+ objectMap["writeBatchSize"] = amss.WriteBatchSize
+ }
+ if amss.WriteBatchTimeout != nil {
+ objectMap["writeBatchTimeout"] = amss.WriteBatchTimeout
+ }
+ if amss.SinkRetryCount != nil {
+ objectMap["sinkRetryCount"] = amss.SinkRetryCount
+ }
+ if amss.SinkRetryWait != nil {
+ objectMap["sinkRetryWait"] = amss.SinkRetryWait
+ }
+ if amss.MaxConcurrentConnections != nil {
+ objectMap["maxConcurrentConnections"] = amss.MaxConcurrentConnections
+ }
+ if amss.Type != "" {
+ objectMap["type"] = amss.Type
+ }
+ for k, v := range amss.AdditionalProperties {
+ objectMap[k] = v
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsCosmosDbMongoDbAPISink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsCosmosDbMongoDbAPISink() (*CosmosDbMongoDbAPISink, bool) {
+ return nil, false
+}
+
+// AsSalesforceServiceCloudSink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsSalesforceServiceCloudSink() (*SalesforceServiceCloudSink, bool) {
+ return nil, false
+}
+
+// AsSalesforceSink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsSalesforceSink() (*SalesforceSink, bool) {
+ return nil, false
+}
+
+// AsAzureDataExplorerSink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsAzureDataExplorerSink() (*AzureDataExplorerSink, bool) {
+ return nil, false
+}
+
+// AsCommonDataServiceForAppsSink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsCommonDataServiceForAppsSink() (*CommonDataServiceForAppsSink, bool) {
+ return nil, false
+}
+
+// AsDynamicsCrmSink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsDynamicsCrmSink() (*DynamicsCrmSink, bool) {
+ return nil, false
+}
+
+// AsDynamicsSink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsDynamicsSink() (*DynamicsSink, bool) {
+ return nil, false
+}
+
+// AsMicrosoftAccessSink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsMicrosoftAccessSink() (*MicrosoftAccessSink, bool) {
+ return nil, false
+}
+
+// AsInformixSink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsInformixSink() (*InformixSink, bool) {
+ return nil, false
+}
+
+// AsOdbcSink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsOdbcSink() (*OdbcSink, bool) {
+ return nil, false
+}
+
+// AsAzureSearchIndexSink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsAzureSearchIndexSink() (*AzureSearchIndexSink, bool) {
+ return nil, false
+}
+
+// AsAzureBlobFSSink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsAzureBlobFSSink() (*AzureBlobFSSink, bool) {
+ return nil, false
+}
+
+// AsAzureDataLakeStoreSink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsAzureDataLakeStoreSink() (*AzureDataLakeStoreSink, bool) {
+ return nil, false
+}
+
+// AsOracleSink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsOracleSink() (*OracleSink, bool) {
+ return nil, false
+}
+
+// AsSQLDWSink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsSQLDWSink() (*SQLDWSink, bool) {
+ return nil, false
+}
+
+// AsSQLMISink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsSQLMISink() (*SQLMISink, bool) {
+ return nil, false
+}
+
+// AsAzureSQLSink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsAzureSQLSink() (*AzureSQLSink, bool) {
+ return nil, false
+}
+
+// AsSQLServerSink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsSQLServerSink() (*SQLServerSink, bool) {
+ return nil, false
+}
+
+// AsSQLSink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsSQLSink() (*SQLSink, bool) {
+ return nil, false
+}
+
+// AsDocumentDbCollectionSink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsDocumentDbCollectionSink() (*DocumentDbCollectionSink, bool) {
+ return nil, false
+}
+
+// AsFileSystemSink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsFileSystemSink() (*FileSystemSink, bool) {
+ return nil, false
+}
+
+// AsBlobSink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsBlobSink() (*BlobSink, bool) {
+ return nil, false
+}
+
+// AsBinarySink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsBinarySink() (*BinarySink, bool) {
+ return nil, false
+}
+
+// AsParquetSink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsParquetSink() (*ParquetSink, bool) {
+ return nil, false
+}
+
+// AsAvroSink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
+// AsAzureTableSink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsAzureTableSink() (*AzureTableSink, bool) {
+ return nil, false
+}
+
+// AsAzureQueueSink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsAzureQueueSink() (*AzureQueueSink, bool) {
+ return nil, false
+}
+
+// AsSapCloudForCustomerSink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsSapCloudForCustomerSink() (*SapCloudForCustomerSink, bool) {
+ return nil, false
+}
+
+// AsAzureMySQLSink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return &amss, true
+}
+
+// AsAzurePostgreSQLSink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
+ return nil, false
+}
+
+// AsJSONSink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
+// AsDelimitedTextSink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
+ return nil, false
+}
+
+// AsCopySink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsCopySink() (*CopySink, bool) {
+ return nil, false
+}
+
+// AsBasicCopySink is the BasicCopySink implementation for AzureMySQLSink.
+func (amss AzureMySQLSink) AsBasicCopySink() (BasicCopySink, bool) {
+ return &amss, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for AzureMySQLSink struct.
+func (amss *AzureMySQLSink) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "preCopyScript":
+ if v != nil {
+ var preCopyScript interface{}
+ err = json.Unmarshal(*v, &preCopyScript)
+ if err != nil {
+ return err
+ }
+ amss.PreCopyScript = preCopyScript
+ }
+ default:
+ if v != nil {
+ var additionalProperties interface{}
+ err = json.Unmarshal(*v, &additionalProperties)
+ if err != nil {
+ return err
+ }
+ if amss.AdditionalProperties == nil {
+ amss.AdditionalProperties = make(map[string]interface{})
+ }
+ amss.AdditionalProperties[k] = additionalProperties
+ }
+ case "writeBatchSize":
+ if v != nil {
+ var writeBatchSize interface{}
+ err = json.Unmarshal(*v, &writeBatchSize)
+ if err != nil {
+ return err
+ }
+ amss.WriteBatchSize = writeBatchSize
+ }
+ case "writeBatchTimeout":
+ if v != nil {
+ var writeBatchTimeout interface{}
+ err = json.Unmarshal(*v, &writeBatchTimeout)
+ if err != nil {
+ return err
+ }
+ amss.WriteBatchTimeout = writeBatchTimeout
+ }
+ case "sinkRetryCount":
+ if v != nil {
+ var sinkRetryCount interface{}
+ err = json.Unmarshal(*v, &sinkRetryCount)
+ if err != nil {
+ return err
+ }
+ amss.SinkRetryCount = sinkRetryCount
+ }
+ case "sinkRetryWait":
+ if v != nil {
+ var sinkRetryWait interface{}
+ err = json.Unmarshal(*v, &sinkRetryWait)
+ if err != nil {
+ return err
+ }
+ amss.SinkRetryWait = sinkRetryWait
+ }
+ case "maxConcurrentConnections":
+ if v != nil {
+ var maxConcurrentConnections interface{}
+ err = json.Unmarshal(*v, &maxConcurrentConnections)
+ if err != nil {
+ return err
+ }
+ amss.MaxConcurrentConnections = maxConcurrentConnections
+ }
+ case "type":
+ if v != nil {
+ var typeVar TypeBasicCopySink
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ amss.Type = typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
// AzureMySQLSource a copy activity Azure MySQL source.
type AzureMySQLSource struct {
// Query - Database query. Type: string (or Expression with resultType string).
@@ -23059,7 +27949,7 @@ type AzureMySQLSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -23183,6 +28073,11 @@ func (amss AzureMySQLSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for AzureMySQLSource.
+func (amss AzureMySQLSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for AzureMySQLSource.
func (amss AzureMySQLSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -23403,6 +28298,11 @@ func (amss AzureMySQLSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for AzureMySQLSource.
+func (amss AzureMySQLSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for AzureMySQLSource.
func (amss AzureMySQLSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -23478,6 +28378,11 @@ func (amss AzureMySQLSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for AzureMySQLSource.
+func (amss AzureMySQLSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for AzureMySQLSource.
func (amss AzureMySQLSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -23488,6 +28393,11 @@ func (amss AzureMySQLSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for AzureMySQLSource.
+func (amss AzureMySQLSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for AzureMySQLSource.
func (amss AzureMySQLSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -23590,7 +28500,7 @@ type AzureMySQLTableDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -23726,6 +28636,11 @@ func (amstd AzureMySQLTableDataset) AsMarketoObjectDataset() (*MarketoObjectData
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for AzureMySQLTableDataset.
+func (amstd AzureMySQLTableDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for AzureMySQLTableDataset.
func (amstd AzureMySQLTableDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -23851,6 +28766,16 @@ func (amstd AzureMySQLTableDataset) AsSapCloudForCustomerResourceDataset() (*Sap
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for AzureMySQLTableDataset.
+func (amstd AzureMySQLTableDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for AzureMySQLTableDataset.
+func (amstd AzureMySQLTableDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for AzureMySQLTableDataset.
func (amstd AzureMySQLTableDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -23891,6 +28816,16 @@ func (amstd AzureMySQLTableDataset) AsRelationalTableDataset() (*RelationalTable
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for AzureMySQLTableDataset.
+func (amstd AzureMySQLTableDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for AzureMySQLTableDataset.
+func (amstd AzureMySQLTableDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for AzureMySQLTableDataset.
func (amstd AzureMySQLTableDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return &amstd, true
@@ -24006,6 +28941,11 @@ func (amstd AzureMySQLTableDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for AzureMySQLTableDataset.
+func (amstd AzureMySQLTableDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for AzureMySQLTableDataset.
func (amstd AzureMySQLTableDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -24016,6 +28956,11 @@ func (amstd AzureMySQLTableDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for AzureMySQLTableDataset.
+func (amstd AzureMySQLTableDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for AzureMySQLTableDataset.
func (amstd AzureMySQLTableDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -24159,7 +29104,7 @@ type AzurePostgreSQLLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -24311,6 +29256,11 @@ func (apsls AzurePostgreSQLLinkedService) AsMarketoLinkedService() (*MarketoLink
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for AzurePostgreSQLLinkedService.
+func (apsls AzurePostgreSQLLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for AzurePostgreSQLLinkedService.
func (apsls AzurePostgreSQLLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -24758,7 +29708,7 @@ type AzurePostgreSQLSink struct {
SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzurePostgreSQLSink', 'TypeDelimitedTextSink'
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
Type TypeBasicCopySink `json:"type,omitempty"`
}
@@ -24913,6 +29863,11 @@ func (apss AzurePostgreSQLSink) AsParquetSink() (*ParquetSink, bool) {
return nil, false
}
+// AsAvroSink is the BasicCopySink implementation for AzurePostgreSQLSink.
+func (apss AzurePostgreSQLSink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
// AsAzureTableSink is the BasicCopySink implementation for AzurePostgreSQLSink.
func (apss AzurePostgreSQLSink) AsAzureTableSink() (*AzureTableSink, bool) {
return nil, false
@@ -24928,11 +29883,21 @@ func (apss AzurePostgreSQLSink) AsSapCloudForCustomerSink() (*SapCloudForCustome
return nil, false
}
+// AsAzureMySQLSink is the BasicCopySink implementation for AzurePostgreSQLSink.
+func (apss AzurePostgreSQLSink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
// AsAzurePostgreSQLSink is the BasicCopySink implementation for AzurePostgreSQLSink.
func (apss AzurePostgreSQLSink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
return &apss, true
}
+// AsJSONSink is the BasicCopySink implementation for AzurePostgreSQLSink.
+func (apss AzurePostgreSQLSink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSink is the BasicCopySink implementation for AzurePostgreSQLSink.
func (apss AzurePostgreSQLSink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
return nil, false
@@ -25050,7 +30015,7 @@ type AzurePostgreSQLSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -25174,6 +30139,11 @@ func (apss AzurePostgreSQLSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for AzurePostgreSQLSource.
+func (apss AzurePostgreSQLSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for AzurePostgreSQLSource.
func (apss AzurePostgreSQLSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -25394,6 +30364,11 @@ func (apss AzurePostgreSQLSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for AzurePostgreSQLSource.
+func (apss AzurePostgreSQLSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for AzurePostgreSQLSource.
func (apss AzurePostgreSQLSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -25469,6 +30444,11 @@ func (apss AzurePostgreSQLSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for AzurePostgreSQLSource.
+func (apss AzurePostgreSQLSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for AzurePostgreSQLSource.
func (apss AzurePostgreSQLSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -25479,6 +30459,11 @@ func (apss AzurePostgreSQLSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for AzurePostgreSQLSource.
+func (apss AzurePostgreSQLSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for AzurePostgreSQLSource.
func (apss AzurePostgreSQLSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -25581,7 +30566,7 @@ type AzurePostgreSQLTableDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -25717,6 +30702,11 @@ func (apstd AzurePostgreSQLTableDataset) AsMarketoObjectDataset() (*MarketoObjec
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for AzurePostgreSQLTableDataset.
+func (apstd AzurePostgreSQLTableDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for AzurePostgreSQLTableDataset.
func (apstd AzurePostgreSQLTableDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -25842,6 +30832,16 @@ func (apstd AzurePostgreSQLTableDataset) AsSapCloudForCustomerResourceDataset()
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for AzurePostgreSQLTableDataset.
+func (apstd AzurePostgreSQLTableDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for AzurePostgreSQLTableDataset.
+func (apstd AzurePostgreSQLTableDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for AzurePostgreSQLTableDataset.
func (apstd AzurePostgreSQLTableDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -25882,6 +30882,16 @@ func (apstd AzurePostgreSQLTableDataset) AsRelationalTableDataset() (*Relational
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for AzurePostgreSQLTableDataset.
+func (apstd AzurePostgreSQLTableDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for AzurePostgreSQLTableDataset.
+func (apstd AzurePostgreSQLTableDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for AzurePostgreSQLTableDataset.
func (apstd AzurePostgreSQLTableDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -25997,6 +31007,11 @@ func (apstd AzurePostgreSQLTableDataset) AsBinaryDataset() (*BinaryDataset, bool
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for AzurePostgreSQLTableDataset.
+func (apstd AzurePostgreSQLTableDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for AzurePostgreSQLTableDataset.
func (apstd AzurePostgreSQLTableDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -26007,6 +31022,11 @@ func (apstd AzurePostgreSQLTableDataset) AsParquetDataset() (*ParquetDataset, bo
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for AzurePostgreSQLTableDataset.
+func (apstd AzurePostgreSQLTableDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for AzurePostgreSQLTableDataset.
func (apstd AzurePostgreSQLTableDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -26154,7 +31174,7 @@ type AzureQueueSink struct {
SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzurePostgreSQLSink', 'TypeDelimitedTextSink'
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
Type TypeBasicCopySink `json:"type,omitempty"`
}
@@ -26306,6 +31326,11 @@ func (aqs AzureQueueSink) AsParquetSink() (*ParquetSink, bool) {
return nil, false
}
+// AsAvroSink is the BasicCopySink implementation for AzureQueueSink.
+func (aqs AzureQueueSink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
// AsAzureTableSink is the BasicCopySink implementation for AzureQueueSink.
func (aqs AzureQueueSink) AsAzureTableSink() (*AzureTableSink, bool) {
return nil, false
@@ -26321,11 +31346,21 @@ func (aqs AzureQueueSink) AsSapCloudForCustomerSink() (*SapCloudForCustomerSink,
return nil, false
}
+// AsAzureMySQLSink is the BasicCopySink implementation for AzureQueueSink.
+func (aqs AzureQueueSink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
// AsAzurePostgreSQLSink is the BasicCopySink implementation for AzureQueueSink.
func (aqs AzureQueueSink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
return nil, false
}
+// AsJSONSink is the BasicCopySink implementation for AzureQueueSink.
+func (aqs AzureQueueSink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSink is the BasicCopySink implementation for AzureQueueSink.
func (aqs AzureQueueSink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
return nil, false
@@ -26442,7 +31477,7 @@ type AzureSearchIndexDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -26578,6 +31613,11 @@ func (asid AzureSearchIndexDataset) AsMarketoObjectDataset() (*MarketoObjectData
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for AzureSearchIndexDataset.
+func (asid AzureSearchIndexDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for AzureSearchIndexDataset.
func (asid AzureSearchIndexDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -26703,6 +31743,16 @@ func (asid AzureSearchIndexDataset) AsSapCloudForCustomerResourceDataset() (*Sap
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for AzureSearchIndexDataset.
+func (asid AzureSearchIndexDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for AzureSearchIndexDataset.
+func (asid AzureSearchIndexDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for AzureSearchIndexDataset.
func (asid AzureSearchIndexDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -26743,6 +31793,16 @@ func (asid AzureSearchIndexDataset) AsRelationalTableDataset() (*RelationalTable
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for AzureSearchIndexDataset.
+func (asid AzureSearchIndexDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for AzureSearchIndexDataset.
+func (asid AzureSearchIndexDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for AzureSearchIndexDataset.
func (asid AzureSearchIndexDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -26858,6 +31918,11 @@ func (asid AzureSearchIndexDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for AzureSearchIndexDataset.
+func (asid AzureSearchIndexDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for AzureSearchIndexDataset.
func (asid AzureSearchIndexDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -26868,6 +31933,11 @@ func (asid AzureSearchIndexDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for AzureSearchIndexDataset.
+func (asid AzureSearchIndexDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for AzureSearchIndexDataset.
func (asid AzureSearchIndexDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -27013,7 +32083,7 @@ type AzureSearchIndexSink struct {
SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzurePostgreSQLSink', 'TypeDelimitedTextSink'
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
Type TypeBasicCopySink `json:"type,omitempty"`
}
@@ -27168,6 +32238,11 @@ func (asis AzureSearchIndexSink) AsParquetSink() (*ParquetSink, bool) {
return nil, false
}
+// AsAvroSink is the BasicCopySink implementation for AzureSearchIndexSink.
+func (asis AzureSearchIndexSink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
// AsAzureTableSink is the BasicCopySink implementation for AzureSearchIndexSink.
func (asis AzureSearchIndexSink) AsAzureTableSink() (*AzureTableSink, bool) {
return nil, false
@@ -27183,11 +32258,21 @@ func (asis AzureSearchIndexSink) AsSapCloudForCustomerSink() (*SapCloudForCustom
return nil, false
}
+// AsAzureMySQLSink is the BasicCopySink implementation for AzureSearchIndexSink.
+func (asis AzureSearchIndexSink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
// AsAzurePostgreSQLSink is the BasicCopySink implementation for AzureSearchIndexSink.
func (asis AzureSearchIndexSink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
return nil, false
}
+// AsJSONSink is the BasicCopySink implementation for AzureSearchIndexSink.
+func (asis AzureSearchIndexSink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSink is the BasicCopySink implementation for AzureSearchIndexSink.
func (asis AzureSearchIndexSink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
return nil, false
@@ -27307,7 +32392,7 @@ type AzureSearchLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -27459,6 +32544,11 @@ func (asls AzureSearchLinkedService) AsMarketoLinkedService() (*MarketoLinkedSer
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for AzureSearchLinkedService.
+func (asls AzureSearchLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for AzureSearchLinkedService.
func (asls AzureSearchLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -27945,7 +33035,7 @@ type AzureSQLDatabaseLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -28097,6 +33187,11 @@ func (asdls AzureSQLDatabaseLinkedService) AsMarketoLinkedService() (*MarketoLin
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for AzureSQLDatabaseLinkedService.
+func (asdls AzureSQLDatabaseLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for AzureSQLDatabaseLinkedService.
func (asdls AzureSQLDatabaseLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -28616,7 +33711,7 @@ type AzureSQLDWLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -28768,6 +33863,11 @@ func (asdls AzureSQLDWLinkedService) AsMarketoLinkedService() (*MarketoLinkedSer
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for AzureSQLDWLinkedService.
+func (asdls AzureSQLDWLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for AzureSQLDWLinkedService.
func (asdls AzureSQLDWLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -29293,7 +34393,7 @@ type AzureSQLDWTableDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -29429,6 +34529,11 @@ func (asdtd AzureSQLDWTableDataset) AsMarketoObjectDataset() (*MarketoObjectData
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for AzureSQLDWTableDataset.
+func (asdtd AzureSQLDWTableDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for AzureSQLDWTableDataset.
func (asdtd AzureSQLDWTableDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -29554,6 +34659,16 @@ func (asdtd AzureSQLDWTableDataset) AsSapCloudForCustomerResourceDataset() (*Sap
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for AzureSQLDWTableDataset.
+func (asdtd AzureSQLDWTableDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for AzureSQLDWTableDataset.
+func (asdtd AzureSQLDWTableDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for AzureSQLDWTableDataset.
func (asdtd AzureSQLDWTableDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -29594,6 +34709,16 @@ func (asdtd AzureSQLDWTableDataset) AsRelationalTableDataset() (*RelationalTable
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for AzureSQLDWTableDataset.
+func (asdtd AzureSQLDWTableDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for AzureSQLDWTableDataset.
+func (asdtd AzureSQLDWTableDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for AzureSQLDWTableDataset.
func (asdtd AzureSQLDWTableDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -29709,6 +34834,11 @@ func (asdtd AzureSQLDWTableDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for AzureSQLDWTableDataset.
+func (asdtd AzureSQLDWTableDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for AzureSQLDWTableDataset.
func (asdtd AzureSQLDWTableDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -29719,6 +34849,11 @@ func (asdtd AzureSQLDWTableDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for AzureSQLDWTableDataset.
+func (asdtd AzureSQLDWTableDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for AzureSQLDWTableDataset.
func (asdtd AzureSQLDWTableDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -29844,8 +34979,12 @@ func (asdtd *AzureSQLDWTableDataset) UnmarshalJSON(body []byte) error {
// AzureSQLDWTableDatasetTypeProperties azure SQL Data Warehouse dataset properties.
type AzureSQLDWTableDatasetTypeProperties struct {
- // TableName - The table name of the Azure SQL Data Warehouse. Type: string (or Expression with resultType string).
+ // TableName - This property will be retired. Please consider using schema + table properties instead.
TableName interface{} `json:"tableName,omitempty"`
+ // Schema - The schema name of the Azure SQL Data Warehouse. Type: string (or Expression with resultType string).
+ Schema interface{} `json:"schema,omitempty"`
+ // Table - The table name of the Azure SQL Data Warehouse. Type: string (or Expression with resultType string).
+ Table interface{} `json:"table,omitempty"`
}
// AzureSQLMILinkedService azure SQL Managed Instance linked service.
@@ -29862,7 +35001,7 @@ type AzureSQLMILinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -30014,6 +35153,11 @@ func (asmls AzureSQLMILinkedService) AsMarketoLinkedService() (*MarketoLinkedSer
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for AzureSQLMILinkedService.
+func (asmls AzureSQLMILinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for AzureSQLMILinkedService.
func (asmls AzureSQLMILinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -30539,7 +35683,7 @@ type AzureSQLMITableDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -30675,6 +35819,11 @@ func (asmtd AzureSQLMITableDataset) AsMarketoObjectDataset() (*MarketoObjectData
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for AzureSQLMITableDataset.
+func (asmtd AzureSQLMITableDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for AzureSQLMITableDataset.
func (asmtd AzureSQLMITableDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -30800,6 +35949,16 @@ func (asmtd AzureSQLMITableDataset) AsSapCloudForCustomerResourceDataset() (*Sap
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for AzureSQLMITableDataset.
+func (asmtd AzureSQLMITableDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for AzureSQLMITableDataset.
+func (asmtd AzureSQLMITableDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for AzureSQLMITableDataset.
func (asmtd AzureSQLMITableDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -30840,6 +35999,16 @@ func (asmtd AzureSQLMITableDataset) AsRelationalTableDataset() (*RelationalTable
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for AzureSQLMITableDataset.
+func (asmtd AzureSQLMITableDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for AzureSQLMITableDataset.
+func (asmtd AzureSQLMITableDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for AzureSQLMITableDataset.
func (asmtd AzureSQLMITableDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -30955,6 +36124,11 @@ func (asmtd AzureSQLMITableDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for AzureSQLMITableDataset.
+func (asmtd AzureSQLMITableDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for AzureSQLMITableDataset.
func (asmtd AzureSQLMITableDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -30965,6 +36139,11 @@ func (asmtd AzureSQLMITableDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for AzureSQLMITableDataset.
+func (asmtd AzureSQLMITableDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for AzureSQLMITableDataset.
func (asmtd AzureSQLMITableDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -31110,6 +36289,8 @@ type AzureSQLSink struct {
StoredProcedureParameters map[string]*StoredProcedureParameter `json:"storedProcedureParameters"`
// StoredProcedureTableTypeParameterName - The stored procedure parameter name of the table type. Type: string (or Expression with resultType string).
StoredProcedureTableTypeParameterName interface{} `json:"storedProcedureTableTypeParameterName,omitempty"`
+ // TableOption - The option to handle sink table, such as autoCreate. For now only 'autoCreate' value is supported. Type: string (or Expression with resultType string).
+ TableOption interface{} `json:"tableOption,omitempty"`
// AdditionalProperties - Unmatched properties from the message are deserialized this collection
AdditionalProperties map[string]interface{} `json:""`
// WriteBatchSize - Write batch size. Type: integer (or Expression with resultType integer), minimum: 0.
@@ -31122,7 +36303,7 @@ type AzureSQLSink struct {
SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzurePostgreSQLSink', 'TypeDelimitedTextSink'
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
Type TypeBasicCopySink `json:"type,omitempty"`
}
@@ -31145,6 +36326,9 @@ func (ass AzureSQLSink) MarshalJSON() ([]byte, error) {
if ass.StoredProcedureTableTypeParameterName != nil {
objectMap["storedProcedureTableTypeParameterName"] = ass.StoredProcedureTableTypeParameterName
}
+ if ass.TableOption != nil {
+ objectMap["tableOption"] = ass.TableOption
+ }
if ass.WriteBatchSize != nil {
objectMap["writeBatchSize"] = ass.WriteBatchSize
}
@@ -31289,6 +36473,11 @@ func (ass AzureSQLSink) AsParquetSink() (*ParquetSink, bool) {
return nil, false
}
+// AsAvroSink is the BasicCopySink implementation for AzureSQLSink.
+func (ass AzureSQLSink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
// AsAzureTableSink is the BasicCopySink implementation for AzureSQLSink.
func (ass AzureSQLSink) AsAzureTableSink() (*AzureTableSink, bool) {
return nil, false
@@ -31304,11 +36493,21 @@ func (ass AzureSQLSink) AsSapCloudForCustomerSink() (*SapCloudForCustomerSink, b
return nil, false
}
+// AsAzureMySQLSink is the BasicCopySink implementation for AzureSQLSink.
+func (ass AzureSQLSink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
// AsAzurePostgreSQLSink is the BasicCopySink implementation for AzureSQLSink.
func (ass AzureSQLSink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
return nil, false
}
+// AsJSONSink is the BasicCopySink implementation for AzureSQLSink.
+func (ass AzureSQLSink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSink is the BasicCopySink implementation for AzureSQLSink.
func (ass AzureSQLSink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
return nil, false
@@ -31378,6 +36577,15 @@ func (ass *AzureSQLSink) UnmarshalJSON(body []byte) error {
}
ass.StoredProcedureTableTypeParameterName = storedProcedureTableTypeParameterName
}
+ case "tableOption":
+ if v != nil {
+ var tableOption interface{}
+ err = json.Unmarshal(*v, &tableOption)
+ if err != nil {
+ return err
+ }
+ ass.TableOption = tableOption
+ }
default:
if v != nil {
var additionalProperties interface{}
@@ -31468,7 +36676,7 @@ type AzureSQLSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -31601,6 +36809,11 @@ func (ass AzureSQLSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for AzureSQLSource.
+func (ass AzureSQLSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for AzureSQLSource.
func (ass AzureSQLSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -31821,6 +37034,11 @@ func (ass AzureSQLSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for AzureSQLSource.
+func (ass AzureSQLSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for AzureSQLSource.
func (ass AzureSQLSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -31896,6 +37114,11 @@ func (ass AzureSQLSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for AzureSQLSource.
+func (ass AzureSQLSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for AzureSQLSource.
func (ass AzureSQLSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -31906,6 +37129,11 @@ func (ass AzureSQLSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for AzureSQLSource.
+func (ass AzureSQLSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for AzureSQLSource.
func (ass AzureSQLSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -32035,7 +37263,7 @@ type AzureSQLTableDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -32171,6 +37399,11 @@ func (astd AzureSQLTableDataset) AsMarketoObjectDataset() (*MarketoObjectDataset
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for AzureSQLTableDataset.
+func (astd AzureSQLTableDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for AzureSQLTableDataset.
func (astd AzureSQLTableDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -32296,6 +37529,16 @@ func (astd AzureSQLTableDataset) AsSapCloudForCustomerResourceDataset() (*SapClo
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for AzureSQLTableDataset.
+func (astd AzureSQLTableDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for AzureSQLTableDataset.
+func (astd AzureSQLTableDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for AzureSQLTableDataset.
func (astd AzureSQLTableDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -32336,6 +37579,16 @@ func (astd AzureSQLTableDataset) AsRelationalTableDataset() (*RelationalTableDat
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for AzureSQLTableDataset.
+func (astd AzureSQLTableDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for AzureSQLTableDataset.
+func (astd AzureSQLTableDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for AzureSQLTableDataset.
func (astd AzureSQLTableDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -32451,6 +37704,11 @@ func (astd AzureSQLTableDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for AzureSQLTableDataset.
+func (astd AzureSQLTableDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for AzureSQLTableDataset.
func (astd AzureSQLTableDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -32461,6 +37719,11 @@ func (astd AzureSQLTableDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for AzureSQLTableDataset.
+func (astd AzureSQLTableDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for AzureSQLTableDataset.
func (astd AzureSQLTableDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -32586,8 +37849,12 @@ func (astd *AzureSQLTableDataset) UnmarshalJSON(body []byte) error {
// AzureSQLTableDatasetTypeProperties azure SQL dataset properties.
type AzureSQLTableDatasetTypeProperties struct {
- // TableName - The table name of the Azure SQL database. Type: string (or Expression with resultType string).
+ // TableName - This property will be retired. Please consider using schema + table properties instead.
TableName interface{} `json:"tableName,omitempty"`
+ // Schema - The schema name of the Azure SQL database. Type: string (or Expression with resultType string).
+ Schema interface{} `json:"schema,omitempty"`
+ // Table - The table name of the Azure SQL database. Type: string (or Expression with resultType string).
+ Table interface{} `json:"table,omitempty"`
}
// AzureStorageLinkedService the storage account linked service.
@@ -32604,7 +37871,7 @@ type AzureStorageLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -32756,6 +38023,11 @@ func (asls AzureStorageLinkedService) AsMarketoLinkedService() (*MarketoLinkedSe
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for AzureStorageLinkedService.
+func (asls AzureStorageLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for AzureStorageLinkedService.
func (asls AzureStorageLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -33211,7 +38483,7 @@ type AzureTableDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -33347,6 +38619,11 @@ func (atd AzureTableDataset) AsMarketoObjectDataset() (*MarketoObjectDataset, bo
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for AzureTableDataset.
+func (atd AzureTableDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for AzureTableDataset.
func (atd AzureTableDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -33472,6 +38749,16 @@ func (atd AzureTableDataset) AsSapCloudForCustomerResourceDataset() (*SapCloudFo
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for AzureTableDataset.
+func (atd AzureTableDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for AzureTableDataset.
+func (atd AzureTableDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for AzureTableDataset.
func (atd AzureTableDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -33512,6 +38799,16 @@ func (atd AzureTableDataset) AsRelationalTableDataset() (*RelationalTableDataset
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for AzureTableDataset.
+func (atd AzureTableDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for AzureTableDataset.
+func (atd AzureTableDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for AzureTableDataset.
func (atd AzureTableDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -33627,6 +38924,11 @@ func (atd AzureTableDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for AzureTableDataset.
+func (atd AzureTableDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for AzureTableDataset.
func (atd AzureTableDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -33637,6 +38939,11 @@ func (atd AzureTableDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for AzureTableDataset.
+func (atd AzureTableDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for AzureTableDataset.
func (atd AzureTableDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -33788,7 +39095,7 @@ type AzureTableSink struct {
SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzurePostgreSQLSink', 'TypeDelimitedTextSink'
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
Type TypeBasicCopySink `json:"type,omitempty"`
}
@@ -33952,6 +39259,11 @@ func (ats AzureTableSink) AsParquetSink() (*ParquetSink, bool) {
return nil, false
}
+// AsAvroSink is the BasicCopySink implementation for AzureTableSink.
+func (ats AzureTableSink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
// AsAzureTableSink is the BasicCopySink implementation for AzureTableSink.
func (ats AzureTableSink) AsAzureTableSink() (*AzureTableSink, bool) {
return &ats, true
@@ -33967,11 +39279,21 @@ func (ats AzureTableSink) AsSapCloudForCustomerSink() (*SapCloudForCustomerSink,
return nil, false
}
+// AsAzureMySQLSink is the BasicCopySink implementation for AzureTableSink.
+func (ats AzureTableSink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
// AsAzurePostgreSQLSink is the BasicCopySink implementation for AzureTableSink.
func (ats AzureTableSink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
return nil, false
}
+// AsJSONSink is the BasicCopySink implementation for AzureTableSink.
+func (ats AzureTableSink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSink is the BasicCopySink implementation for AzureTableSink.
func (ats AzureTableSink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
return nil, false
@@ -34118,7 +39440,7 @@ type AzureTableSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -34245,6 +39567,11 @@ func (ats AzureTableSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for AzureTableSource.
+func (ats AzureTableSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for AzureTableSource.
func (ats AzureTableSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -34465,6 +39792,11 @@ func (ats AzureTableSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for AzureTableSource.
+func (ats AzureTableSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for AzureTableSource.
func (ats AzureTableSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -34540,6 +39872,11 @@ func (ats AzureTableSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for AzureTableSource.
+func (ats AzureTableSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for AzureTableSource.
func (ats AzureTableSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -34550,6 +39887,11 @@ func (ats AzureTableSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for AzureTableSource.
+func (ats AzureTableSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for AzureTableSource.
func (ats AzureTableSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -34655,7 +39997,7 @@ type AzureTableStorageLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -34807,6 +40149,11 @@ func (atsls AzureTableStorageLinkedService) AsMarketoLinkedService() (*MarketoLi
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for AzureTableStorageLinkedService.
+func (atsls AzureTableStorageLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for AzureTableStorageLinkedService.
func (atsls AzureTableStorageLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -35248,7 +40595,7 @@ type BinaryDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -35384,6 +40731,11 @@ func (bd BinaryDataset) AsMarketoObjectDataset() (*MarketoObjectDataset, bool) {
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for BinaryDataset.
+func (bd BinaryDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for BinaryDataset.
func (bd BinaryDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -35509,6 +40861,16 @@ func (bd BinaryDataset) AsSapCloudForCustomerResourceDataset() (*SapCloudForCust
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for BinaryDataset.
+func (bd BinaryDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for BinaryDataset.
+func (bd BinaryDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for BinaryDataset.
func (bd BinaryDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -35549,6 +40911,16 @@ func (bd BinaryDataset) AsRelationalTableDataset() (*RelationalTableDataset, boo
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for BinaryDataset.
+func (bd BinaryDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for BinaryDataset.
+func (bd BinaryDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for BinaryDataset.
func (bd BinaryDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -35664,6 +41036,11 @@ func (bd BinaryDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return &bd, true
}
+// AsJSONDataset is the BasicDataset implementation for BinaryDataset.
+func (bd BinaryDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for BinaryDataset.
func (bd BinaryDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -35674,6 +41051,11 @@ func (bd BinaryDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for BinaryDataset.
+func (bd BinaryDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for BinaryDataset.
func (bd BinaryDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -35840,7 +41222,7 @@ func (bdtp *BinaryDatasetTypeProperties) UnmarshalJSON(body []byte) error {
// BinarySink a copy activity Binary sink.
type BinarySink struct {
// StoreSettings - Binary store settings.
- StoreSettings *StoreReadSettings `json:"storeSettings,omitempty"`
+ StoreSettings BasicStoreWriteSettings `json:"storeSettings,omitempty"`
// AdditionalProperties - Unmatched properties from the message are deserialized this collection
AdditionalProperties map[string]interface{} `json:""`
// WriteBatchSize - Write batch size. Type: integer (or Expression with resultType integer), minimum: 0.
@@ -35853,7 +41235,7 @@ type BinarySink struct {
SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzurePostgreSQLSink', 'TypeDelimitedTextSink'
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
Type TypeBasicCopySink `json:"type,omitempty"`
}
@@ -35861,9 +41243,7 @@ type BinarySink struct {
func (bs BinarySink) MarshalJSON() ([]byte, error) {
bs.Type = TypeBinarySink
objectMap := make(map[string]interface{})
- if bs.StoreSettings != nil {
- objectMap["storeSettings"] = bs.StoreSettings
- }
+ objectMap["storeSettings"] = bs.StoreSettings
if bs.WriteBatchSize != nil {
objectMap["writeBatchSize"] = bs.WriteBatchSize
}
@@ -36008,6 +41388,11 @@ func (bs BinarySink) AsParquetSink() (*ParquetSink, bool) {
return nil, false
}
+// AsAvroSink is the BasicCopySink implementation for BinarySink.
+func (bs BinarySink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
// AsAzureTableSink is the BasicCopySink implementation for BinarySink.
func (bs BinarySink) AsAzureTableSink() (*AzureTableSink, bool) {
return nil, false
@@ -36023,11 +41408,21 @@ func (bs BinarySink) AsSapCloudForCustomerSink() (*SapCloudForCustomerSink, bool
return nil, false
}
+// AsAzureMySQLSink is the BasicCopySink implementation for BinarySink.
+func (bs BinarySink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
// AsAzurePostgreSQLSink is the BasicCopySink implementation for BinarySink.
func (bs BinarySink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
return nil, false
}
+// AsJSONSink is the BasicCopySink implementation for BinarySink.
+func (bs BinarySink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSink is the BasicCopySink implementation for BinarySink.
func (bs BinarySink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
return nil, false
@@ -36054,12 +41449,11 @@ func (bs *BinarySink) UnmarshalJSON(body []byte) error {
switch k {
case "storeSettings":
if v != nil {
- var storeSettings StoreReadSettings
- err = json.Unmarshal(*v, &storeSettings)
+ storeSettings, err := unmarshalBasicStoreWriteSettings(*v)
if err != nil {
return err
}
- bs.StoreSettings = &storeSettings
+ bs.StoreSettings = storeSettings
}
default:
if v != nil {
@@ -36145,7 +41539,7 @@ type BinarySource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -36269,6 +41663,11 @@ func (bs BinarySource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for BinarySource.
+func (bs BinarySource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for BinarySource.
func (bs BinarySource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -36489,6 +41888,11 @@ func (bs BinarySource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for BinarySource.
+func (bs BinarySource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for BinarySource.
func (bs BinarySource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -36564,6 +41968,11 @@ func (bs BinarySource) AsBinarySource() (*BinarySource, bool) {
return &bs, true
}
+// AsJSONSource is the BasicCopySource implementation for BinarySource.
+func (bs BinarySource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for BinarySource.
func (bs BinarySource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -36574,6 +41983,11 @@ func (bs BinarySource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for BinarySource.
+func (bs BinarySource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for BinarySource.
func (bs BinarySource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -36666,7 +42080,7 @@ type BlobEventsTrigger struct {
AdditionalProperties map[string]interface{} `json:""`
// Description - Trigger description.
Description *string `json:"description,omitempty"`
- // RuntimeState - READ-ONLY; Indicates if trigger is running or not. Updated when Start/Stop APIs are called on the Trigger. Possible values include: 'Started', 'Stopped', 'Disabled'
+ // RuntimeState - READ-ONLY; Indicates if trigger is running or not. Updated when Start/Stop APIs are called on the Trigger. Possible values include: 'TriggerRuntimeStateStarted', 'TriggerRuntimeStateStopped', 'TriggerRuntimeStateDisabled'
RuntimeState TriggerRuntimeState `json:"runtimeState,omitempty"`
// Annotations - List of tags that can be used for describing the trigger.
Annotations *[]interface{} `json:"annotations,omitempty"`
@@ -36859,7 +42273,7 @@ type BlobSink struct {
SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzurePostgreSQLSink', 'TypeDelimitedTextSink'
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
Type TypeBasicCopySink `json:"type,omitempty"`
}
@@ -37023,6 +42437,11 @@ func (bs BlobSink) AsParquetSink() (*ParquetSink, bool) {
return nil, false
}
+// AsAvroSink is the BasicCopySink implementation for BlobSink.
+func (bs BlobSink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
// AsAzureTableSink is the BasicCopySink implementation for BlobSink.
func (bs BlobSink) AsAzureTableSink() (*AzureTableSink, bool) {
return nil, false
@@ -37038,11 +42457,21 @@ func (bs BlobSink) AsSapCloudForCustomerSink() (*SapCloudForCustomerSink, bool)
return nil, false
}
+// AsAzureMySQLSink is the BasicCopySink implementation for BlobSink.
+func (bs BlobSink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
// AsAzurePostgreSQLSink is the BasicCopySink implementation for BlobSink.
func (bs BlobSink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
return nil, false
}
+// AsJSONSink is the BasicCopySink implementation for BlobSink.
+func (bs BlobSink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSink is the BasicCopySink implementation for BlobSink.
func (bs BlobSink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
return nil, false
@@ -37191,7 +42620,7 @@ type BlobSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -37321,6 +42750,11 @@ func (bs BlobSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for BlobSource.
+func (bs BlobSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for BlobSource.
func (bs BlobSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -37541,6 +42975,11 @@ func (bs BlobSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for BlobSource.
+func (bs BlobSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for BlobSource.
func (bs BlobSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -37616,6 +43055,11 @@ func (bs BlobSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for BlobSource.
+func (bs BlobSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for BlobSource.
func (bs BlobSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -37626,6 +43070,11 @@ func (bs BlobSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for BlobSource.
+func (bs BlobSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for BlobSource.
func (bs BlobSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -37736,7 +43185,7 @@ type BlobTrigger struct {
AdditionalProperties map[string]interface{} `json:""`
// Description - Trigger description.
Description *string `json:"description,omitempty"`
- // RuntimeState - READ-ONLY; Indicates if trigger is running or not. Updated when Start/Stop APIs are called on the Trigger. Possible values include: 'Started', 'Stopped', 'Disabled'
+ // RuntimeState - READ-ONLY; Indicates if trigger is running or not. Updated when Start/Stop APIs are called on the Trigger. Possible values include: 'TriggerRuntimeStateStarted', 'TriggerRuntimeStateStopped', 'TriggerRuntimeStateDisabled'
RuntimeState TriggerRuntimeState `json:"runtimeState,omitempty"`
// Annotations - List of tags that can be used for describing the trigger.
Annotations *[]interface{} `json:"annotations,omitempty"`
@@ -37919,7 +43368,7 @@ type CassandraLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -38071,6 +43520,11 @@ func (cls CassandraLinkedService) AsMarketoLinkedService() (*MarketoLinkedServic
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for CassandraLinkedService.
+func (cls CassandraLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for CassandraLinkedService.
func (cls CassandraLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -38590,7 +44044,7 @@ type CassandraSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -38717,6 +44171,11 @@ func (cs CassandraSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for CassandraSource.
+func (cs CassandraSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for CassandraSource.
func (cs CassandraSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -38937,6 +44396,11 @@ func (cs CassandraSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for CassandraSource.
+func (cs CassandraSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for CassandraSource.
func (cs CassandraSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -39012,6 +44476,11 @@ func (cs CassandraSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for CassandraSource.
+func (cs CassandraSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for CassandraSource.
func (cs CassandraSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -39022,6 +44491,11 @@ func (cs CassandraSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for CassandraSource.
+func (cs CassandraSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for CassandraSource.
func (cs CassandraSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -39133,7 +44607,7 @@ type CassandraTableDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -39269,6 +44743,11 @@ func (ctd CassandraTableDataset) AsMarketoObjectDataset() (*MarketoObjectDataset
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for CassandraTableDataset.
+func (ctd CassandraTableDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for CassandraTableDataset.
func (ctd CassandraTableDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -39394,6 +44873,16 @@ func (ctd CassandraTableDataset) AsSapCloudForCustomerResourceDataset() (*SapClo
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for CassandraTableDataset.
+func (ctd CassandraTableDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for CassandraTableDataset.
+func (ctd CassandraTableDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for CassandraTableDataset.
func (ctd CassandraTableDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -39434,6 +44923,16 @@ func (ctd CassandraTableDataset) AsRelationalTableDataset() (*RelationalTableDat
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for CassandraTableDataset.
+func (ctd CassandraTableDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for CassandraTableDataset.
+func (ctd CassandraTableDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for CassandraTableDataset.
func (ctd CassandraTableDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -39549,6 +45048,11 @@ func (ctd CassandraTableDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for CassandraTableDataset.
+func (ctd CassandraTableDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for CassandraTableDataset.
func (ctd CassandraTableDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -39559,6 +45063,11 @@ func (ctd CassandraTableDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for CassandraTableDataset.
+func (ctd CassandraTableDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for CassandraTableDataset.
func (ctd CassandraTableDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -39761,7 +45270,7 @@ type CommonDataServiceForAppsEntityDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -39897,6 +45406,11 @@ func (cdsfaed CommonDataServiceForAppsEntityDataset) AsMarketoObjectDataset() (*
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for CommonDataServiceForAppsEntityDataset.
+func (cdsfaed CommonDataServiceForAppsEntityDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for CommonDataServiceForAppsEntityDataset.
func (cdsfaed CommonDataServiceForAppsEntityDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -40022,6 +45536,16 @@ func (cdsfaed CommonDataServiceForAppsEntityDataset) AsSapCloudForCustomerResour
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for CommonDataServiceForAppsEntityDataset.
+func (cdsfaed CommonDataServiceForAppsEntityDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for CommonDataServiceForAppsEntityDataset.
+func (cdsfaed CommonDataServiceForAppsEntityDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for CommonDataServiceForAppsEntityDataset.
func (cdsfaed CommonDataServiceForAppsEntityDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -40062,6 +45586,16 @@ func (cdsfaed CommonDataServiceForAppsEntityDataset) AsRelationalTableDataset()
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for CommonDataServiceForAppsEntityDataset.
+func (cdsfaed CommonDataServiceForAppsEntityDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for CommonDataServiceForAppsEntityDataset.
+func (cdsfaed CommonDataServiceForAppsEntityDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for CommonDataServiceForAppsEntityDataset.
func (cdsfaed CommonDataServiceForAppsEntityDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -40177,6 +45711,11 @@ func (cdsfaed CommonDataServiceForAppsEntityDataset) AsBinaryDataset() (*BinaryD
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for CommonDataServiceForAppsEntityDataset.
+func (cdsfaed CommonDataServiceForAppsEntityDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for CommonDataServiceForAppsEntityDataset.
func (cdsfaed CommonDataServiceForAppsEntityDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -40187,6 +45726,11 @@ func (cdsfaed CommonDataServiceForAppsEntityDataset) AsParquetDataset() (*Parque
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for CommonDataServiceForAppsEntityDataset.
+func (cdsfaed CommonDataServiceForAppsEntityDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for CommonDataServiceForAppsEntityDataset.
func (cdsfaed CommonDataServiceForAppsEntityDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -40331,7 +45875,7 @@ type CommonDataServiceForAppsLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -40483,6 +46027,11 @@ func (cdsfals CommonDataServiceForAppsLinkedService) AsMarketoLinkedService() (*
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for CommonDataServiceForAppsLinkedService.
+func (cdsfals CommonDataServiceForAppsLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for CommonDataServiceForAppsLinkedService.
func (cdsfals CommonDataServiceForAppsLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -41040,7 +46589,7 @@ type CommonDataServiceForAppsSink struct {
SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzurePostgreSQLSink', 'TypeDelimitedTextSink'
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
Type TypeBasicCopySink `json:"type,omitempty"`
}
@@ -41198,6 +46747,11 @@ func (cdsfas CommonDataServiceForAppsSink) AsParquetSink() (*ParquetSink, bool)
return nil, false
}
+// AsAvroSink is the BasicCopySink implementation for CommonDataServiceForAppsSink.
+func (cdsfas CommonDataServiceForAppsSink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
// AsAzureTableSink is the BasicCopySink implementation for CommonDataServiceForAppsSink.
func (cdsfas CommonDataServiceForAppsSink) AsAzureTableSink() (*AzureTableSink, bool) {
return nil, false
@@ -41213,11 +46767,21 @@ func (cdsfas CommonDataServiceForAppsSink) AsSapCloudForCustomerSink() (*SapClou
return nil, false
}
+// AsAzureMySQLSink is the BasicCopySink implementation for CommonDataServiceForAppsSink.
+func (cdsfas CommonDataServiceForAppsSink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
// AsAzurePostgreSQLSink is the BasicCopySink implementation for CommonDataServiceForAppsSink.
func (cdsfas CommonDataServiceForAppsSink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
return nil, false
}
+// AsJSONSink is the BasicCopySink implementation for CommonDataServiceForAppsSink.
+func (cdsfas CommonDataServiceForAppsSink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSink is the BasicCopySink implementation for CommonDataServiceForAppsSink.
func (cdsfas CommonDataServiceForAppsSink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
return nil, false
@@ -41344,7 +46908,7 @@ type CommonDataServiceForAppsSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -41468,6 +47032,11 @@ func (cdsfas CommonDataServiceForAppsSource) AsMarketoSource() (*MarketoSource,
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for CommonDataServiceForAppsSource.
+func (cdsfas CommonDataServiceForAppsSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for CommonDataServiceForAppsSource.
func (cdsfas CommonDataServiceForAppsSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -41688,6 +47257,11 @@ func (cdsfas CommonDataServiceForAppsSource) AsODataSource() (*ODataSource, bool
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for CommonDataServiceForAppsSource.
+func (cdsfas CommonDataServiceForAppsSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for CommonDataServiceForAppsSource.
func (cdsfas CommonDataServiceForAppsSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -41763,6 +47337,11 @@ func (cdsfas CommonDataServiceForAppsSource) AsBinarySource() (*BinarySource, bo
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for CommonDataServiceForAppsSource.
+func (cdsfas CommonDataServiceForAppsSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for CommonDataServiceForAppsSource.
func (cdsfas CommonDataServiceForAppsSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -41773,6 +47352,11 @@ func (cdsfas CommonDataServiceForAppsSource) AsParquetSource() (*ParquetSource,
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for CommonDataServiceForAppsSource.
+func (cdsfas CommonDataServiceForAppsSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for CommonDataServiceForAppsSource.
func (cdsfas CommonDataServiceForAppsSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -41869,7 +47453,7 @@ type ConcurLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -42021,6 +47605,11 @@ func (cls ConcurLinkedService) AsMarketoLinkedService() (*MarketoLinkedService,
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for ConcurLinkedService.
+func (cls ConcurLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for ConcurLinkedService.
func (cls ConcurLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -42557,7 +48146,7 @@ type ConcurObjectDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -42693,6 +48282,11 @@ func (cod ConcurObjectDataset) AsMarketoObjectDataset() (*MarketoObjectDataset,
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for ConcurObjectDataset.
+func (cod ConcurObjectDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for ConcurObjectDataset.
func (cod ConcurObjectDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -42818,6 +48412,16 @@ func (cod ConcurObjectDataset) AsSapCloudForCustomerResourceDataset() (*SapCloud
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for ConcurObjectDataset.
+func (cod ConcurObjectDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for ConcurObjectDataset.
+func (cod ConcurObjectDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for ConcurObjectDataset.
func (cod ConcurObjectDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -42858,6 +48462,16 @@ func (cod ConcurObjectDataset) AsRelationalTableDataset() (*RelationalTableDatas
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for ConcurObjectDataset.
+func (cod ConcurObjectDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for ConcurObjectDataset.
+func (cod ConcurObjectDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for ConcurObjectDataset.
func (cod ConcurObjectDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -42973,6 +48587,11 @@ func (cod ConcurObjectDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for ConcurObjectDataset.
+func (cod ConcurObjectDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for ConcurObjectDataset.
func (cod ConcurObjectDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -42983,6 +48602,11 @@ func (cod ConcurObjectDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for ConcurObjectDataset.
+func (cod ConcurObjectDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for ConcurObjectDataset.
func (cod ConcurObjectDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -43118,7 +48742,7 @@ type ConcurSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -43242,6 +48866,11 @@ func (cs ConcurSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for ConcurSource.
+func (cs ConcurSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for ConcurSource.
func (cs ConcurSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -43462,6 +49091,11 @@ func (cs ConcurSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for ConcurSource.
+func (cs ConcurSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for ConcurSource.
func (cs ConcurSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -43537,6 +49171,11 @@ func (cs ConcurSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for ConcurSource.
+func (cs ConcurSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for ConcurSource.
func (cs ConcurSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -43547,6 +49186,11 @@ func (cs ConcurSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for ConcurSource.
+func (cs ConcurSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for ConcurSource.
func (cs ConcurSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -44547,10 +50191,13 @@ type BasicCopySink interface {
AsBlobSink() (*BlobSink, bool)
AsBinarySink() (*BinarySink, bool)
AsParquetSink() (*ParquetSink, bool)
+ AsAvroSink() (*AvroSink, bool)
AsAzureTableSink() (*AzureTableSink, bool)
AsAzureQueueSink() (*AzureQueueSink, bool)
AsSapCloudForCustomerSink() (*SapCloudForCustomerSink, bool)
+ AsAzureMySQLSink() (*AzureMySQLSink, bool)
AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool)
+ AsJSONSink() (*JSONSink, bool)
AsDelimitedTextSink() (*DelimitedTextSink, bool)
AsCopySink() (*CopySink, bool)
}
@@ -44569,7 +50216,7 @@ type CopySink struct {
SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzurePostgreSQLSink', 'TypeDelimitedTextSink'
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
Type TypeBasicCopySink `json:"type,omitempty"`
}
@@ -44677,6 +50324,10 @@ func unmarshalBasicCopySink(body []byte) (BasicCopySink, error) {
var ps ParquetSink
err := json.Unmarshal(body, &ps)
return ps, err
+ case string(TypeAvroSink):
+ var as AvroSink
+ err := json.Unmarshal(body, &as)
+ return as, err
case string(TypeAzureTableSink):
var ats AzureTableSink
err := json.Unmarshal(body, &ats)
@@ -44689,10 +50340,18 @@ func unmarshalBasicCopySink(body []byte) (BasicCopySink, error) {
var scfcs SapCloudForCustomerSink
err := json.Unmarshal(body, &scfcs)
return scfcs, err
+ case string(TypeAzureMySQLSink):
+ var amss AzureMySQLSink
+ err := json.Unmarshal(body, &amss)
+ return amss, err
case string(TypeAzurePostgreSQLSink):
var apss AzurePostgreSQLSink
err := json.Unmarshal(body, &apss)
return apss, err
+ case string(TypeJSONSink):
+ var js JSONSink
+ err := json.Unmarshal(body, &js)
+ return js, err
case string(TypeDelimitedTextSink):
var dts DelimitedTextSink
err := json.Unmarshal(body, &dts)
@@ -44870,6 +50529,11 @@ func (cs CopySink) AsParquetSink() (*ParquetSink, bool) {
return nil, false
}
+// AsAvroSink is the BasicCopySink implementation for CopySink.
+func (cs CopySink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
// AsAzureTableSink is the BasicCopySink implementation for CopySink.
func (cs CopySink) AsAzureTableSink() (*AzureTableSink, bool) {
return nil, false
@@ -44885,11 +50549,21 @@ func (cs CopySink) AsSapCloudForCustomerSink() (*SapCloudForCustomerSink, bool)
return nil, false
}
+// AsAzureMySQLSink is the BasicCopySink implementation for CopySink.
+func (cs CopySink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
// AsAzurePostgreSQLSink is the BasicCopySink implementation for CopySink.
func (cs CopySink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
return nil, false
}
+// AsJSONSink is the BasicCopySink implementation for CopySink.
+func (cs CopySink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSink is the BasicCopySink implementation for CopySink.
func (cs CopySink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
return nil, false
@@ -45007,6 +50681,7 @@ type BasicCopySource interface {
AsPhoenixSource() (*PhoenixSource, bool)
AsPaypalSource() (*PaypalSource, bool)
AsMarketoSource() (*MarketoSource, bool)
+ AsAzureMariaDBSource() (*AzureMariaDBSource, bool)
AsMariaDBSource() (*MariaDBSource, bool)
AsMagentoSource() (*MagentoSource, bool)
AsJiraSource() (*JiraSource, bool)
@@ -45051,6 +50726,7 @@ type BasicCopySource interface {
AsSalesforceServiceCloudSource() (*SalesforceServiceCloudSource, bool)
AsSalesforceSource() (*SalesforceSource, bool)
AsODataSource() (*ODataSource, bool)
+ AsSapBwSource() (*SapBwSource, bool)
AsSybaseSource() (*SybaseSource, bool)
AsPostgreSQLSource() (*PostgreSQLSource, bool)
AsMySQLSource() (*MySQLSource, bool)
@@ -45066,8 +50742,10 @@ type BasicCopySource interface {
AsBlobSource() (*BlobSource, bool)
AsAzureTableSource() (*AzureTableSource, bool)
AsBinarySource() (*BinarySource, bool)
+ AsJSONSource() (*JSONSource, bool)
AsDelimitedTextSource() (*DelimitedTextSource, bool)
AsParquetSource() (*ParquetSource, bool)
+ AsAvroSource() (*AvroSource, bool)
AsCopySource() (*CopySource, bool)
}
@@ -45081,7 +50759,7 @@ type CopySource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -45169,6 +50847,10 @@ func unmarshalBasicCopySource(body []byte) (BasicCopySource, error) {
var ms MarketoSource
err := json.Unmarshal(body, &ms)
return ms, err
+ case string(TypeAzureMariaDBSource):
+ var amds AzureMariaDBSource
+ err := json.Unmarshal(body, &amds)
+ return amds, err
case string(TypeMariaDBSource):
var mds MariaDBSource
err := json.Unmarshal(body, &mds)
@@ -45345,6 +51027,10 @@ func unmarshalBasicCopySource(body []byte) (BasicCopySource, error) {
var ods ODataSource
err := json.Unmarshal(body, &ods)
return ods, err
+ case string(TypeSapBwSource):
+ var sbs SapBwSource
+ err := json.Unmarshal(body, &sbs)
+ return sbs, err
case string(TypeSybaseSource):
var ss SybaseSource
err := json.Unmarshal(body, &ss)
@@ -45405,6 +51091,10 @@ func unmarshalBasicCopySource(body []byte) (BasicCopySource, error) {
var bs BinarySource
err := json.Unmarshal(body, &bs)
return bs, err
+ case string(TypeJSONSource):
+ var js JSONSource
+ err := json.Unmarshal(body, &js)
+ return js, err
case string(TypeDelimitedTextSource):
var dts DelimitedTextSource
err := json.Unmarshal(body, &dts)
@@ -45413,6 +51103,10 @@ func unmarshalBasicCopySource(body []byte) (BasicCopySource, error) {
var ps ParquetSource
err := json.Unmarshal(body, &ps)
return ps, err
+ case string(TypeAvroSource):
+ var as AvroSource
+ err := json.Unmarshal(body, &as)
+ return as, err
default:
var cs CopySource
err := json.Unmarshal(body, &cs)
@@ -45555,6 +51249,11 @@ func (cs CopySource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for CopySource.
+func (cs CopySource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for CopySource.
func (cs CopySource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -45775,6 +51474,11 @@ func (cs CopySource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for CopySource.
+func (cs CopySource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for CopySource.
func (cs CopySource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -45850,6 +51554,11 @@ func (cs CopySource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for CopySource.
+func (cs CopySource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for CopySource.
func (cs CopySource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -45860,6 +51569,11 @@ func (cs CopySource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for CopySource.
+func (cs CopySource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for CopySource.
func (cs CopySource) AsCopySource() (*CopySource, bool) {
return &cs, true
@@ -45947,7 +51661,7 @@ type CosmosDbLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -46099,6 +51813,11 @@ func (cdls CosmosDbLinkedService) AsMarketoLinkedService() (*MarketoLinkedServic
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for CosmosDbLinkedService.
+func (cdls CosmosDbLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for CosmosDbLinkedService.
func (cdls CosmosDbLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -46550,7 +52269,7 @@ type CosmosDbMongoDbAPICollectionDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -46686,6 +52405,11 @@ func (cdmdacd CosmosDbMongoDbAPICollectionDataset) AsMarketoObjectDataset() (*Ma
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for CosmosDbMongoDbAPICollectionDataset.
+func (cdmdacd CosmosDbMongoDbAPICollectionDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for CosmosDbMongoDbAPICollectionDataset.
func (cdmdacd CosmosDbMongoDbAPICollectionDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -46811,6 +52535,16 @@ func (cdmdacd CosmosDbMongoDbAPICollectionDataset) AsSapCloudForCustomerResource
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for CosmosDbMongoDbAPICollectionDataset.
+func (cdmdacd CosmosDbMongoDbAPICollectionDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for CosmosDbMongoDbAPICollectionDataset.
+func (cdmdacd CosmosDbMongoDbAPICollectionDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for CosmosDbMongoDbAPICollectionDataset.
func (cdmdacd CosmosDbMongoDbAPICollectionDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -46851,6 +52585,16 @@ func (cdmdacd CosmosDbMongoDbAPICollectionDataset) AsRelationalTableDataset() (*
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for CosmosDbMongoDbAPICollectionDataset.
+func (cdmdacd CosmosDbMongoDbAPICollectionDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for CosmosDbMongoDbAPICollectionDataset.
+func (cdmdacd CosmosDbMongoDbAPICollectionDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for CosmosDbMongoDbAPICollectionDataset.
func (cdmdacd CosmosDbMongoDbAPICollectionDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -46966,6 +52710,11 @@ func (cdmdacd CosmosDbMongoDbAPICollectionDataset) AsBinaryDataset() (*BinaryDat
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for CosmosDbMongoDbAPICollectionDataset.
+func (cdmdacd CosmosDbMongoDbAPICollectionDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for CosmosDbMongoDbAPICollectionDataset.
func (cdmdacd CosmosDbMongoDbAPICollectionDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -46976,6 +52725,11 @@ func (cdmdacd CosmosDbMongoDbAPICollectionDataset) AsParquetDataset() (*ParquetD
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for CosmosDbMongoDbAPICollectionDataset.
+func (cdmdacd CosmosDbMongoDbAPICollectionDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for CosmosDbMongoDbAPICollectionDataset.
func (cdmdacd CosmosDbMongoDbAPICollectionDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -47119,7 +52873,7 @@ type CosmosDbMongoDbAPILinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -47271,6 +53025,11 @@ func (cdmdals CosmosDbMongoDbAPILinkedService) AsMarketoLinkedService() (*Market
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for CosmosDbMongoDbAPILinkedService.
+func (cdmdals CosmosDbMongoDbAPILinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for CosmosDbMongoDbAPILinkedService.
func (cdmdals CosmosDbMongoDbAPILinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -47716,7 +53475,7 @@ type CosmosDbMongoDbAPISink struct {
SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzurePostgreSQLSink', 'TypeDelimitedTextSink'
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
Type TypeBasicCopySink `json:"type,omitempty"`
}
@@ -47871,6 +53630,11 @@ func (cdmdas CosmosDbMongoDbAPISink) AsParquetSink() (*ParquetSink, bool) {
return nil, false
}
+// AsAvroSink is the BasicCopySink implementation for CosmosDbMongoDbAPISink.
+func (cdmdas CosmosDbMongoDbAPISink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
// AsAzureTableSink is the BasicCopySink implementation for CosmosDbMongoDbAPISink.
func (cdmdas CosmosDbMongoDbAPISink) AsAzureTableSink() (*AzureTableSink, bool) {
return nil, false
@@ -47886,11 +53650,21 @@ func (cdmdas CosmosDbMongoDbAPISink) AsSapCloudForCustomerSink() (*SapCloudForCu
return nil, false
}
+// AsAzureMySQLSink is the BasicCopySink implementation for CosmosDbMongoDbAPISink.
+func (cdmdas CosmosDbMongoDbAPISink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
// AsAzurePostgreSQLSink is the BasicCopySink implementation for CosmosDbMongoDbAPISink.
func (cdmdas CosmosDbMongoDbAPISink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
return nil, false
}
+// AsJSONSink is the BasicCopySink implementation for CosmosDbMongoDbAPISink.
+func (cdmdas CosmosDbMongoDbAPISink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSink is the BasicCopySink implementation for CosmosDbMongoDbAPISink.
func (cdmdas CosmosDbMongoDbAPISink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
return nil, false
@@ -48012,7 +53786,7 @@ type CosmosDbMongoDbAPISource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -48142,6 +53916,11 @@ func (cdmdas CosmosDbMongoDbAPISource) AsMarketoSource() (*MarketoSource, bool)
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for CosmosDbMongoDbAPISource.
+func (cdmdas CosmosDbMongoDbAPISource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for CosmosDbMongoDbAPISource.
func (cdmdas CosmosDbMongoDbAPISource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -48362,6 +54141,11 @@ func (cdmdas CosmosDbMongoDbAPISource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for CosmosDbMongoDbAPISource.
+func (cdmdas CosmosDbMongoDbAPISource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for CosmosDbMongoDbAPISource.
func (cdmdas CosmosDbMongoDbAPISource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -48437,6 +54221,11 @@ func (cdmdas CosmosDbMongoDbAPISource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for CosmosDbMongoDbAPISource.
+func (cdmdas CosmosDbMongoDbAPISource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for CosmosDbMongoDbAPISource.
func (cdmdas CosmosDbMongoDbAPISource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -48447,6 +54236,11 @@ func (cdmdas CosmosDbMongoDbAPISource) AsParquetSource() (*ParquetSource, bool)
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for CosmosDbMongoDbAPISource.
+func (cdmdas CosmosDbMongoDbAPISource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for CosmosDbMongoDbAPISource.
func (cdmdas CosmosDbMongoDbAPISource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -48561,7 +54355,7 @@ type CouchbaseLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -48713,6 +54507,11 @@ func (cls CouchbaseLinkedService) AsMarketoLinkedService() (*MarketoLinkedServic
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for CouchbaseLinkedService.
+func (cls CouchbaseLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for CouchbaseLinkedService.
func (cls CouchbaseLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -49156,7 +54955,7 @@ type CouchbaseSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -49280,6 +55079,11 @@ func (cs CouchbaseSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for CouchbaseSource.
+func (cs CouchbaseSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for CouchbaseSource.
func (cs CouchbaseSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -49500,6 +55304,11 @@ func (cs CouchbaseSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for CouchbaseSource.
+func (cs CouchbaseSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for CouchbaseSource.
func (cs CouchbaseSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -49575,6 +55384,11 @@ func (cs CouchbaseSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for CouchbaseSource.
+func (cs CouchbaseSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for CouchbaseSource.
func (cs CouchbaseSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -49585,6 +55399,11 @@ func (cs CouchbaseSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for CouchbaseSource.
+func (cs CouchbaseSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for CouchbaseSource.
func (cs CouchbaseSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -49687,7 +55506,7 @@ type CouchbaseTableDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -49823,6 +55642,11 @@ func (ctd CouchbaseTableDataset) AsMarketoObjectDataset() (*MarketoObjectDataset
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for CouchbaseTableDataset.
+func (ctd CouchbaseTableDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for CouchbaseTableDataset.
func (ctd CouchbaseTableDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -49948,6 +55772,16 @@ func (ctd CouchbaseTableDataset) AsSapCloudForCustomerResourceDataset() (*SapClo
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for CouchbaseTableDataset.
+func (ctd CouchbaseTableDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for CouchbaseTableDataset.
+func (ctd CouchbaseTableDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for CouchbaseTableDataset.
func (ctd CouchbaseTableDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -49988,6 +55822,16 @@ func (ctd CouchbaseTableDataset) AsRelationalTableDataset() (*RelationalTableDat
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for CouchbaseTableDataset.
+func (ctd CouchbaseTableDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for CouchbaseTableDataset.
+func (ctd CouchbaseTableDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for CouchbaseTableDataset.
func (ctd CouchbaseTableDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -50103,6 +55947,11 @@ func (ctd CouchbaseTableDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for CouchbaseTableDataset.
+func (ctd CouchbaseTableDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for CouchbaseTableDataset.
func (ctd CouchbaseTableDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -50113,6 +55962,11 @@ func (ctd CouchbaseTableDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for CouchbaseTableDataset.
+func (ctd CouchbaseTableDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for CouchbaseTableDataset.
func (ctd CouchbaseTableDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -50663,7 +56517,7 @@ type CustomDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -50799,6 +56653,11 @@ func (cd CustomDataset) AsMarketoObjectDataset() (*MarketoObjectDataset, bool) {
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for CustomDataset.
+func (cd CustomDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for CustomDataset.
func (cd CustomDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -50924,6 +56783,16 @@ func (cd CustomDataset) AsSapCloudForCustomerResourceDataset() (*SapCloudForCust
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for CustomDataset.
+func (cd CustomDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for CustomDataset.
+func (cd CustomDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for CustomDataset.
func (cd CustomDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -50964,6 +56833,16 @@ func (cd CustomDataset) AsRelationalTableDataset() (*RelationalTableDataset, boo
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for CustomDataset.
+func (cd CustomDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for CustomDataset.
+func (cd CustomDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for CustomDataset.
func (cd CustomDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -51079,6 +56958,11 @@ func (cd CustomDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for CustomDataset.
+func (cd CustomDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for CustomDataset.
func (cd CustomDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -51089,6 +56973,11 @@ func (cd CustomDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for CustomDataset.
+func (cd CustomDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for CustomDataset.
func (cd CustomDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -51226,7 +57115,7 @@ type CustomDataSourceLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -51378,6 +57267,11 @@ func (cdsls CustomDataSourceLinkedService) AsMarketoLinkedService() (*MarketoLin
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for CustomDataSourceLinkedService.
+func (cdsls CustomDataSourceLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for CustomDataSourceLinkedService.
func (cdsls CustomDataSourceLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -53271,6 +59165,7 @@ type BasicDataset interface {
AsPhoenixObjectDataset() (*PhoenixObjectDataset, bool)
AsPaypalObjectDataset() (*PaypalObjectDataset, bool)
AsMarketoObjectDataset() (*MarketoObjectDataset, bool)
+ AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool)
AsMariaDBTableDataset() (*MariaDBTableDataset, bool)
AsMagentoObjectDataset() (*MagentoObjectDataset, bool)
AsJiraObjectDataset() (*JiraObjectDataset, bool)
@@ -53296,6 +59191,8 @@ type BasicDataset interface {
AsSapHanaTableDataset() (*SapHanaTableDataset, bool)
AsSapEccResourceDataset() (*SapEccResourceDataset, bool)
AsSapCloudForCustomerResourceDataset() (*SapCloudForCustomerResourceDataset, bool)
+ AsSapBwCubeDataset() (*SapBwCubeDataset, bool)
+ AsSybaseTableDataset() (*SybaseTableDataset, bool)
AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool)
AsSalesforceObjectDataset() (*SalesforceObjectDataset, bool)
AsMicrosoftAccessTableDataset() (*MicrosoftAccessTableDataset, bool)
@@ -53304,6 +59201,8 @@ type BasicDataset interface {
AsOdbcTableDataset() (*OdbcTableDataset, bool)
AsInformixTableDataset() (*InformixTableDataset, bool)
AsRelationalTableDataset() (*RelationalTableDataset, bool)
+ AsDb2TableDataset() (*Db2TableDataset, bool)
+ AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool)
AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool)
AsTeradataTableDataset() (*TeradataTableDataset, bool)
AsOracleTableDataset() (*OracleTableDataset, bool)
@@ -53327,8 +59226,10 @@ type BasicDataset interface {
AsAzureTableDataset() (*AzureTableDataset, bool)
AsAzureBlobDataset() (*AzureBlobDataset, bool)
AsBinaryDataset() (*BinaryDataset, bool)
+ AsJSONDataset() (*JSONDataset, bool)
AsDelimitedTextDataset() (*DelimitedTextDataset, bool)
AsParquetDataset() (*ParquetDataset, bool)
+ AsAvroDataset() (*AvroDataset, bool)
AsAmazonS3Dataset() (*AmazonS3Dataset, bool)
AsDataset() (*Dataset, bool)
}
@@ -53352,7 +59253,7 @@ type Dataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -53440,6 +59341,10 @@ func unmarshalBasicDataset(body []byte) (BasicDataset, error) {
var mod MarketoObjectDataset
err := json.Unmarshal(body, &mod)
return mod, err
+ case string(TypeAzureMariaDBTable):
+ var amdtd AzureMariaDBTableDataset
+ err := json.Unmarshal(body, &amdtd)
+ return amdtd, err
case string(TypeMariaDBTable):
var mdtd MariaDBTableDataset
err := json.Unmarshal(body, &mdtd)
@@ -53540,6 +59445,14 @@ func unmarshalBasicDataset(body []byte) (BasicDataset, error) {
var scfcrd SapCloudForCustomerResourceDataset
err := json.Unmarshal(body, &scfcrd)
return scfcrd, err
+ case string(TypeSapBwCube):
+ var sbcd SapBwCubeDataset
+ err := json.Unmarshal(body, &sbcd)
+ return sbcd, err
+ case string(TypeSybaseTable):
+ var std SybaseTableDataset
+ err := json.Unmarshal(body, &std)
+ return std, err
case string(TypeSalesforceServiceCloudObject):
var sscod SalesforceServiceCloudObjectDataset
err := json.Unmarshal(body, &sscod)
@@ -53572,6 +59485,14 @@ func unmarshalBasicDataset(body []byte) (BasicDataset, error) {
var rtd RelationalTableDataset
err := json.Unmarshal(body, &rtd)
return rtd, err
+ case string(TypeDb2Table):
+ var d2td Db2TableDataset
+ err := json.Unmarshal(body, &d2td)
+ return d2td, err
+ case string(TypeAmazonRedshiftTable):
+ var artd AmazonRedshiftTableDataset
+ err := json.Unmarshal(body, &artd)
+ return artd, err
case string(TypeAzureMySQLTable):
var amstd AzureMySQLTableDataset
err := json.Unmarshal(body, &amstd)
@@ -53664,6 +59585,10 @@ func unmarshalBasicDataset(body []byte) (BasicDataset, error) {
var bd BinaryDataset
err := json.Unmarshal(body, &bd)
return bd, err
+ case string(TypeJSON):
+ var jd JSONDataset
+ err := json.Unmarshal(body, &jd)
+ return jd, err
case string(TypeDelimitedText):
var dtd DelimitedTextDataset
err := json.Unmarshal(body, &dtd)
@@ -53672,6 +59597,10 @@ func unmarshalBasicDataset(body []byte) (BasicDataset, error) {
var pd ParquetDataset
err := json.Unmarshal(body, &pd)
return pd, err
+ case string(TypeAvro):
+ var ad AvroDataset
+ err := json.Unmarshal(body, &ad)
+ return ad, err
case string(TypeAmazonS3Object):
var asd AmazonS3Dataset
err := json.Unmarshal(body, &asd)
@@ -53830,6 +59759,11 @@ func (d Dataset) AsMarketoObjectDataset() (*MarketoObjectDataset, bool) {
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for Dataset.
+func (d Dataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for Dataset.
func (d Dataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -53955,6 +59889,16 @@ func (d Dataset) AsSapCloudForCustomerResourceDataset() (*SapCloudForCustomerRes
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for Dataset.
+func (d Dataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for Dataset.
+func (d Dataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for Dataset.
func (d Dataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -53995,6 +59939,16 @@ func (d Dataset) AsRelationalTableDataset() (*RelationalTableDataset, bool) {
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for Dataset.
+func (d Dataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for Dataset.
+func (d Dataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for Dataset.
func (d Dataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -54110,6 +60064,11 @@ func (d Dataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for Dataset.
+func (d Dataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for Dataset.
func (d Dataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -54120,6 +60079,11 @@ func (d Dataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for Dataset.
+func (d Dataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for Dataset.
func (d Dataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -55302,7 +61266,7 @@ type Db2LinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -55454,6 +61418,11 @@ func (d2ls Db2LinkedService) AsMarketoLinkedService() (*MarketoLinkedService, bo
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for Db2LinkedService.
+func (d2ls Db2LinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for Db2LinkedService.
func (d2ls Db2LinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -55971,7 +61940,7 @@ type Db2Source struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -56095,6 +62064,11 @@ func (d2s Db2Source) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for Db2Source.
+func (d2s Db2Source) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for Db2Source.
func (d2s Db2Source) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -56315,6 +62289,11 @@ func (d2s Db2Source) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for Db2Source.
+func (d2s Db2Source) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for Db2Source.
func (d2s Db2Source) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -56390,6 +62369,11 @@ func (d2s Db2Source) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for Db2Source.
+func (d2s Db2Source) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for Db2Source.
func (d2s Db2Source) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -56400,6 +62384,11 @@ func (d2s Db2Source) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for Db2Source.
+func (d2s Db2Source) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for Db2Source.
func (d2s Db2Source) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -56482,6 +62471,620 @@ func (d2s *Db2Source) UnmarshalJSON(body []byte) error {
return nil
}
+// Db2TableDataset the Db2 table dataset.
+type Db2TableDataset struct {
+ // Db2TableDatasetTypeProperties - Db2 table dataset properties.
+ *Db2TableDatasetTypeProperties `json:"typeProperties,omitempty"`
+ // AdditionalProperties - Unmatched properties from the message are deserialized this collection
+ AdditionalProperties map[string]interface{} `json:""`
+ // Description - Dataset description.
+ Description *string `json:"description,omitempty"`
+ // Structure - Columns that define the structure of the dataset. Type: array (or Expression with resultType array), itemType: DatasetDataElement.
+ Structure interface{} `json:"structure,omitempty"`
+ // Schema - Columns that define the physical type schema of the dataset. Type: array (or Expression with resultType array), itemType: DatasetSchemaDataElement.
+ Schema interface{} `json:"schema,omitempty"`
+ // LinkedServiceName - Linked service reference.
+ LinkedServiceName *LinkedServiceReference `json:"linkedServiceName,omitempty"`
+ // Parameters - Parameters for dataset.
+ Parameters map[string]*ParameterSpecification `json:"parameters"`
+ // Annotations - List of tags that can be used for describing the Dataset.
+ Annotations *[]interface{} `json:"annotations,omitempty"`
+ // Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
+ Folder *DatasetFolder `json:"folder,omitempty"`
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
+ Type TypeBasicDataset `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for Db2TableDataset.
+func (d2td Db2TableDataset) MarshalJSON() ([]byte, error) {
+ d2td.Type = TypeDb2Table
+ objectMap := make(map[string]interface{})
+ if d2td.Db2TableDatasetTypeProperties != nil {
+ objectMap["typeProperties"] = d2td.Db2TableDatasetTypeProperties
+ }
+ if d2td.Description != nil {
+ objectMap["description"] = d2td.Description
+ }
+ if d2td.Structure != nil {
+ objectMap["structure"] = d2td.Structure
+ }
+ if d2td.Schema != nil {
+ objectMap["schema"] = d2td.Schema
+ }
+ if d2td.LinkedServiceName != nil {
+ objectMap["linkedServiceName"] = d2td.LinkedServiceName
+ }
+ if d2td.Parameters != nil {
+ objectMap["parameters"] = d2td.Parameters
+ }
+ if d2td.Annotations != nil {
+ objectMap["annotations"] = d2td.Annotations
+ }
+ if d2td.Folder != nil {
+ objectMap["folder"] = d2td.Folder
+ }
+ if d2td.Type != "" {
+ objectMap["type"] = d2td.Type
+ }
+ for k, v := range d2td.AdditionalProperties {
+ objectMap[k] = v
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsGoogleAdWordsObjectDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsGoogleAdWordsObjectDataset() (*GoogleAdWordsObjectDataset, bool) {
+ return nil, false
+}
+
+// AsAzureDataExplorerTableDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsAzureDataExplorerTableDataset() (*AzureDataExplorerTableDataset, bool) {
+ return nil, false
+}
+
+// AsOracleServiceCloudObjectDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsOracleServiceCloudObjectDataset() (*OracleServiceCloudObjectDataset, bool) {
+ return nil, false
+}
+
+// AsDynamicsAXResourceDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsDynamicsAXResourceDataset() (*DynamicsAXResourceDataset, bool) {
+ return nil, false
+}
+
+// AsResponsysObjectDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsResponsysObjectDataset() (*ResponsysObjectDataset, bool) {
+ return nil, false
+}
+
+// AsSalesforceMarketingCloudObjectDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsSalesforceMarketingCloudObjectDataset() (*SalesforceMarketingCloudObjectDataset, bool) {
+ return nil, false
+}
+
+// AsVerticaTableDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsVerticaTableDataset() (*VerticaTableDataset, bool) {
+ return nil, false
+}
+
+// AsNetezzaTableDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsNetezzaTableDataset() (*NetezzaTableDataset, bool) {
+ return nil, false
+}
+
+// AsZohoObjectDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsZohoObjectDataset() (*ZohoObjectDataset, bool) {
+ return nil, false
+}
+
+// AsXeroObjectDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsXeroObjectDataset() (*XeroObjectDataset, bool) {
+ return nil, false
+}
+
+// AsSquareObjectDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsSquareObjectDataset() (*SquareObjectDataset, bool) {
+ return nil, false
+}
+
+// AsSparkObjectDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsSparkObjectDataset() (*SparkObjectDataset, bool) {
+ return nil, false
+}
+
+// AsShopifyObjectDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsShopifyObjectDataset() (*ShopifyObjectDataset, bool) {
+ return nil, false
+}
+
+// AsServiceNowObjectDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsServiceNowObjectDataset() (*ServiceNowObjectDataset, bool) {
+ return nil, false
+}
+
+// AsQuickBooksObjectDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsQuickBooksObjectDataset() (*QuickBooksObjectDataset, bool) {
+ return nil, false
+}
+
+// AsPrestoObjectDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsPrestoObjectDataset() (*PrestoObjectDataset, bool) {
+ return nil, false
+}
+
+// AsPhoenixObjectDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsPhoenixObjectDataset() (*PhoenixObjectDataset, bool) {
+ return nil, false
+}
+
+// AsPaypalObjectDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsPaypalObjectDataset() (*PaypalObjectDataset, bool) {
+ return nil, false
+}
+
+// AsMarketoObjectDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsMarketoObjectDataset() (*MarketoObjectDataset, bool) {
+ return nil, false
+}
+
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
+// AsMariaDBTableDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
+ return nil, false
+}
+
+// AsMagentoObjectDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsMagentoObjectDataset() (*MagentoObjectDataset, bool) {
+ return nil, false
+}
+
+// AsJiraObjectDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsJiraObjectDataset() (*JiraObjectDataset, bool) {
+ return nil, false
+}
+
+// AsImpalaObjectDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsImpalaObjectDataset() (*ImpalaObjectDataset, bool) {
+ return nil, false
+}
+
+// AsHubspotObjectDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsHubspotObjectDataset() (*HubspotObjectDataset, bool) {
+ return nil, false
+}
+
+// AsHiveObjectDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsHiveObjectDataset() (*HiveObjectDataset, bool) {
+ return nil, false
+}
+
+// AsHBaseObjectDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsHBaseObjectDataset() (*HBaseObjectDataset, bool) {
+ return nil, false
+}
+
+// AsGreenplumTableDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsGreenplumTableDataset() (*GreenplumTableDataset, bool) {
+ return nil, false
+}
+
+// AsGoogleBigQueryObjectDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsGoogleBigQueryObjectDataset() (*GoogleBigQueryObjectDataset, bool) {
+ return nil, false
+}
+
+// AsEloquaObjectDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsEloquaObjectDataset() (*EloquaObjectDataset, bool) {
+ return nil, false
+}
+
+// AsDrillTableDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsDrillTableDataset() (*DrillTableDataset, bool) {
+ return nil, false
+}
+
+// AsCouchbaseTableDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsCouchbaseTableDataset() (*CouchbaseTableDataset, bool) {
+ return nil, false
+}
+
+// AsConcurObjectDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsConcurObjectDataset() (*ConcurObjectDataset, bool) {
+ return nil, false
+}
+
+// AsAzurePostgreSQLTableDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsAzurePostgreSQLTableDataset() (*AzurePostgreSQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonMWSObjectDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsAmazonMWSObjectDataset() (*AmazonMWSObjectDataset, bool) {
+ return nil, false
+}
+
+// AsHTTPDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsHTTPDataset() (*HTTPDataset, bool) {
+ return nil, false
+}
+
+// AsAzureSearchIndexDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsAzureSearchIndexDataset() (*AzureSearchIndexDataset, bool) {
+ return nil, false
+}
+
+// AsWebTableDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsWebTableDataset() (*WebTableDataset, bool) {
+ return nil, false
+}
+
+// AsSapTableResourceDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsSapTableResourceDataset() (*SapTableResourceDataset, bool) {
+ return nil, false
+}
+
+// AsRestResourceDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsRestResourceDataset() (*RestResourceDataset, bool) {
+ return nil, false
+}
+
+// AsSQLServerTableDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsSQLServerTableDataset() (*SQLServerTableDataset, bool) {
+ return nil, false
+}
+
+// AsSapOpenHubTableDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsSapOpenHubTableDataset() (*SapOpenHubTableDataset, bool) {
+ return nil, false
+}
+
+// AsSapHanaTableDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsSapHanaTableDataset() (*SapHanaTableDataset, bool) {
+ return nil, false
+}
+
+// AsSapEccResourceDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsSapEccResourceDataset() (*SapEccResourceDataset, bool) {
+ return nil, false
+}
+
+// AsSapCloudForCustomerResourceDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsSapCloudForCustomerResourceDataset() (*SapCloudForCustomerResourceDataset, bool) {
+ return nil, false
+}
+
+// AsSapBwCubeDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
+// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
+ return nil, false
+}
+
+// AsSalesforceObjectDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsSalesforceObjectDataset() (*SalesforceObjectDataset, bool) {
+ return nil, false
+}
+
+// AsMicrosoftAccessTableDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsMicrosoftAccessTableDataset() (*MicrosoftAccessTableDataset, bool) {
+ return nil, false
+}
+
+// AsPostgreSQLTableDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsPostgreSQLTableDataset() (*PostgreSQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsMySQLTableDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsMySQLTableDataset() (*MySQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsOdbcTableDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsOdbcTableDataset() (*OdbcTableDataset, bool) {
+ return nil, false
+}
+
+// AsInformixTableDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsInformixTableDataset() (*InformixTableDataset, bool) {
+ return nil, false
+}
+
+// AsRelationalTableDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsRelationalTableDataset() (*RelationalTableDataset, bool) {
+ return nil, false
+}
+
+// AsDb2TableDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return &d2td, true
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureMySQLTableDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsTeradataTableDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsTeradataTableDataset() (*TeradataTableDataset, bool) {
+ return nil, false
+}
+
+// AsOracleTableDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsOracleTableDataset() (*OracleTableDataset, bool) {
+ return nil, false
+}
+
+// AsODataResourceDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsODataResourceDataset() (*ODataResourceDataset, bool) {
+ return nil, false
+}
+
+// AsCosmosDbMongoDbAPICollectionDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsCosmosDbMongoDbAPICollectionDataset() (*CosmosDbMongoDbAPICollectionDataset, bool) {
+ return nil, false
+}
+
+// AsMongoDbV2CollectionDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsMongoDbV2CollectionDataset() (*MongoDbV2CollectionDataset, bool) {
+ return nil, false
+}
+
+// AsMongoDbCollectionDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsMongoDbCollectionDataset() (*MongoDbCollectionDataset, bool) {
+ return nil, false
+}
+
+// AsFileShareDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsFileShareDataset() (*FileShareDataset, bool) {
+ return nil, false
+}
+
+// AsOffice365Dataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsOffice365Dataset() (*Office365Dataset, bool) {
+ return nil, false
+}
+
+// AsAzureBlobFSDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsAzureBlobFSDataset() (*AzureBlobFSDataset, bool) {
+ return nil, false
+}
+
+// AsAzureDataLakeStoreDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsAzureDataLakeStoreDataset() (*AzureDataLakeStoreDataset, bool) {
+ return nil, false
+}
+
+// AsCommonDataServiceForAppsEntityDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsCommonDataServiceForAppsEntityDataset() (*CommonDataServiceForAppsEntityDataset, bool) {
+ return nil, false
+}
+
+// AsDynamicsCrmEntityDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsDynamicsCrmEntityDataset() (*DynamicsCrmEntityDataset, bool) {
+ return nil, false
+}
+
+// AsDynamicsEntityDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsDynamicsEntityDataset() (*DynamicsEntityDataset, bool) {
+ return nil, false
+}
+
+// AsDocumentDbCollectionDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsDocumentDbCollectionDataset() (*DocumentDbCollectionDataset, bool) {
+ return nil, false
+}
+
+// AsCustomDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsCustomDataset() (*CustomDataset, bool) {
+ return nil, false
+}
+
+// AsCassandraTableDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsCassandraTableDataset() (*CassandraTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureSQLDWTableDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsAzureSQLDWTableDataset() (*AzureSQLDWTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureSQLMITableDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsAzureSQLMITableDataset() (*AzureSQLMITableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureSQLTableDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsAzureSQLTableDataset() (*AzureSQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureTableDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsAzureTableDataset() (*AzureTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureBlobDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsAzureBlobDataset() (*AzureBlobDataset, bool) {
+ return nil, false
+}
+
+// AsBinaryDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsBinaryDataset() (*BinaryDataset, bool) {
+ return nil, false
+}
+
+// AsJSONDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
+// AsDelimitedTextDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
+ return nil, false
+}
+
+// AsParquetDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsParquetDataset() (*ParquetDataset, bool) {
+ return nil, false
+}
+
+// AsAvroDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonS3Dataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
+ return nil, false
+}
+
+// AsDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsDataset() (*Dataset, bool) {
+ return nil, false
+}
+
+// AsBasicDataset is the BasicDataset implementation for Db2TableDataset.
+func (d2td Db2TableDataset) AsBasicDataset() (BasicDataset, bool) {
+ return &d2td, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for Db2TableDataset struct.
+func (d2td *Db2TableDataset) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "typeProperties":
+ if v != nil {
+ var db2TableDatasetTypeProperties Db2TableDatasetTypeProperties
+ err = json.Unmarshal(*v, &db2TableDatasetTypeProperties)
+ if err != nil {
+ return err
+ }
+ d2td.Db2TableDatasetTypeProperties = &db2TableDatasetTypeProperties
+ }
+ default:
+ if v != nil {
+ var additionalProperties interface{}
+ err = json.Unmarshal(*v, &additionalProperties)
+ if err != nil {
+ return err
+ }
+ if d2td.AdditionalProperties == nil {
+ d2td.AdditionalProperties = make(map[string]interface{})
+ }
+ d2td.AdditionalProperties[k] = additionalProperties
+ }
+ case "description":
+ if v != nil {
+ var description string
+ err = json.Unmarshal(*v, &description)
+ if err != nil {
+ return err
+ }
+ d2td.Description = &description
+ }
+ case "structure":
+ if v != nil {
+ var structure interface{}
+ err = json.Unmarshal(*v, &structure)
+ if err != nil {
+ return err
+ }
+ d2td.Structure = structure
+ }
+ case "schema":
+ if v != nil {
+ var schema interface{}
+ err = json.Unmarshal(*v, &schema)
+ if err != nil {
+ return err
+ }
+ d2td.Schema = schema
+ }
+ case "linkedServiceName":
+ if v != nil {
+ var linkedServiceName LinkedServiceReference
+ err = json.Unmarshal(*v, &linkedServiceName)
+ if err != nil {
+ return err
+ }
+ d2td.LinkedServiceName = &linkedServiceName
+ }
+ case "parameters":
+ if v != nil {
+ var parameters map[string]*ParameterSpecification
+ err = json.Unmarshal(*v, ¶meters)
+ if err != nil {
+ return err
+ }
+ d2td.Parameters = parameters
+ }
+ case "annotations":
+ if v != nil {
+ var annotations []interface{}
+ err = json.Unmarshal(*v, &annotations)
+ if err != nil {
+ return err
+ }
+ d2td.Annotations = &annotations
+ }
+ case "folder":
+ if v != nil {
+ var folder DatasetFolder
+ err = json.Unmarshal(*v, &folder)
+ if err != nil {
+ return err
+ }
+ d2td.Folder = &folder
+ }
+ case "type":
+ if v != nil {
+ var typeVar TypeBasicDataset
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ d2td.Type = typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// Db2TableDatasetTypeProperties db2 table dataset properties.
+type Db2TableDatasetTypeProperties struct {
+ // TableName - This property will be retired. Please consider using schema + table properties instead.
+ TableName interface{} `json:"tableName,omitempty"`
+ // Schema - The Db2 schema name. Type: string (or Expression with resultType string).
+ Schema interface{} `json:"schema,omitempty"`
+ // Table - The Db2 table name. Type: string (or Expression with resultType string).
+ Table interface{} `json:"table,omitempty"`
+}
+
// DeleteActivity delete activity.
type DeleteActivity struct {
// DeleteActivityTypeProperties - Delete activity properties.
@@ -56856,7 +63459,7 @@ type DelimitedTextDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -56992,6 +63595,11 @@ func (dtd DelimitedTextDataset) AsMarketoObjectDataset() (*MarketoObjectDataset,
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for DelimitedTextDataset.
+func (dtd DelimitedTextDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for DelimitedTextDataset.
func (dtd DelimitedTextDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -57117,6 +63725,16 @@ func (dtd DelimitedTextDataset) AsSapCloudForCustomerResourceDataset() (*SapClou
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for DelimitedTextDataset.
+func (dtd DelimitedTextDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for DelimitedTextDataset.
+func (dtd DelimitedTextDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for DelimitedTextDataset.
func (dtd DelimitedTextDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -57157,6 +63775,16 @@ func (dtd DelimitedTextDataset) AsRelationalTableDataset() (*RelationalTableData
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for DelimitedTextDataset.
+func (dtd DelimitedTextDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for DelimitedTextDataset.
+func (dtd DelimitedTextDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for DelimitedTextDataset.
func (dtd DelimitedTextDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -57272,6 +63900,11 @@ func (dtd DelimitedTextDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for DelimitedTextDataset.
+func (dtd DelimitedTextDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for DelimitedTextDataset.
func (dtd DelimitedTextDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return &dtd, true
@@ -57282,6 +63915,11 @@ func (dtd DelimitedTextDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for DelimitedTextDataset.
+func (dtd DelimitedTextDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for DelimitedTextDataset.
func (dtd DelimitedTextDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -57501,7 +64139,7 @@ func (dtrs *DelimitedTextReadSettings) UnmarshalJSON(body []byte) error {
// DelimitedTextSink a copy activity DelimitedText sink.
type DelimitedTextSink struct {
// StoreSettings - DelimitedText store settings.
- StoreSettings *StoreWriteSettings `json:"storeSettings,omitempty"`
+ StoreSettings BasicStoreWriteSettings `json:"storeSettings,omitempty"`
// FormatSettings - DelimitedText format settings.
FormatSettings *DelimitedTextWriteSettings `json:"formatSettings,omitempty"`
// AdditionalProperties - Unmatched properties from the message are deserialized this collection
@@ -57516,7 +64154,7 @@ type DelimitedTextSink struct {
SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzurePostgreSQLSink', 'TypeDelimitedTextSink'
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
Type TypeBasicCopySink `json:"type,omitempty"`
}
@@ -57524,9 +64162,7 @@ type DelimitedTextSink struct {
func (dts DelimitedTextSink) MarshalJSON() ([]byte, error) {
dts.Type = TypeDelimitedTextSink
objectMap := make(map[string]interface{})
- if dts.StoreSettings != nil {
- objectMap["storeSettings"] = dts.StoreSettings
- }
+ objectMap["storeSettings"] = dts.StoreSettings
if dts.FormatSettings != nil {
objectMap["formatSettings"] = dts.FormatSettings
}
@@ -57674,6 +64310,11 @@ func (dts DelimitedTextSink) AsParquetSink() (*ParquetSink, bool) {
return nil, false
}
+// AsAvroSink is the BasicCopySink implementation for DelimitedTextSink.
+func (dts DelimitedTextSink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
// AsAzureTableSink is the BasicCopySink implementation for DelimitedTextSink.
func (dts DelimitedTextSink) AsAzureTableSink() (*AzureTableSink, bool) {
return nil, false
@@ -57689,11 +64330,21 @@ func (dts DelimitedTextSink) AsSapCloudForCustomerSink() (*SapCloudForCustomerSi
return nil, false
}
+// AsAzureMySQLSink is the BasicCopySink implementation for DelimitedTextSink.
+func (dts DelimitedTextSink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
// AsAzurePostgreSQLSink is the BasicCopySink implementation for DelimitedTextSink.
func (dts DelimitedTextSink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
return nil, false
}
+// AsJSONSink is the BasicCopySink implementation for DelimitedTextSink.
+func (dts DelimitedTextSink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSink is the BasicCopySink implementation for DelimitedTextSink.
func (dts DelimitedTextSink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
return &dts, true
@@ -57720,12 +64371,11 @@ func (dts *DelimitedTextSink) UnmarshalJSON(body []byte) error {
switch k {
case "storeSettings":
if v != nil {
- var storeSettings StoreWriteSettings
- err = json.Unmarshal(*v, &storeSettings)
+ storeSettings, err := unmarshalBasicStoreWriteSettings(*v)
if err != nil {
return err
}
- dts.StoreSettings = &storeSettings
+ dts.StoreSettings = storeSettings
}
case "formatSettings":
if v != nil {
@@ -57822,7 +64472,7 @@ type DelimitedTextSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -57949,6 +64599,11 @@ func (dts DelimitedTextSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for DelimitedTextSource.
+func (dts DelimitedTextSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for DelimitedTextSource.
func (dts DelimitedTextSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -58169,6 +64824,11 @@ func (dts DelimitedTextSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for DelimitedTextSource.
+func (dts DelimitedTextSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for DelimitedTextSource.
func (dts DelimitedTextSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -58244,6 +64904,11 @@ func (dts DelimitedTextSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for DelimitedTextSource.
+func (dts DelimitedTextSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for DelimitedTextSource.
func (dts DelimitedTextSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return &dts, true
@@ -58254,6 +64919,11 @@ func (dts DelimitedTextSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for DelimitedTextSource.
+func (dts DelimitedTextSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for DelimitedTextSource.
func (dts DelimitedTextSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -58559,7 +65229,7 @@ type DocumentDbCollectionDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -58695,6 +65365,11 @@ func (ddcd DocumentDbCollectionDataset) AsMarketoObjectDataset() (*MarketoObject
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for DocumentDbCollectionDataset.
+func (ddcd DocumentDbCollectionDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for DocumentDbCollectionDataset.
func (ddcd DocumentDbCollectionDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -58820,6 +65495,16 @@ func (ddcd DocumentDbCollectionDataset) AsSapCloudForCustomerResourceDataset() (
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for DocumentDbCollectionDataset.
+func (ddcd DocumentDbCollectionDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for DocumentDbCollectionDataset.
+func (ddcd DocumentDbCollectionDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for DocumentDbCollectionDataset.
func (ddcd DocumentDbCollectionDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -58860,6 +65545,16 @@ func (ddcd DocumentDbCollectionDataset) AsRelationalTableDataset() (*RelationalT
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for DocumentDbCollectionDataset.
+func (ddcd DocumentDbCollectionDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for DocumentDbCollectionDataset.
+func (ddcd DocumentDbCollectionDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for DocumentDbCollectionDataset.
func (ddcd DocumentDbCollectionDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -58975,6 +65670,11 @@ func (ddcd DocumentDbCollectionDataset) AsBinaryDataset() (*BinaryDataset, bool)
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for DocumentDbCollectionDataset.
+func (ddcd DocumentDbCollectionDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for DocumentDbCollectionDataset.
func (ddcd DocumentDbCollectionDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -58985,6 +65685,11 @@ func (ddcd DocumentDbCollectionDataset) AsParquetDataset() (*ParquetDataset, boo
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for DocumentDbCollectionDataset.
+func (ddcd DocumentDbCollectionDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for DocumentDbCollectionDataset.
func (ddcd DocumentDbCollectionDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -59132,7 +65837,7 @@ type DocumentDbCollectionSink struct {
SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzurePostgreSQLSink', 'TypeDelimitedTextSink'
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
Type TypeBasicCopySink `json:"type,omitempty"`
}
@@ -59290,6 +65995,11 @@ func (ddcs DocumentDbCollectionSink) AsParquetSink() (*ParquetSink, bool) {
return nil, false
}
+// AsAvroSink is the BasicCopySink implementation for DocumentDbCollectionSink.
+func (ddcs DocumentDbCollectionSink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
// AsAzureTableSink is the BasicCopySink implementation for DocumentDbCollectionSink.
func (ddcs DocumentDbCollectionSink) AsAzureTableSink() (*AzureTableSink, bool) {
return nil, false
@@ -59305,11 +66015,21 @@ func (ddcs DocumentDbCollectionSink) AsSapCloudForCustomerSink() (*SapCloudForCu
return nil, false
}
+// AsAzureMySQLSink is the BasicCopySink implementation for DocumentDbCollectionSink.
+func (ddcs DocumentDbCollectionSink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
// AsAzurePostgreSQLSink is the BasicCopySink implementation for DocumentDbCollectionSink.
func (ddcs DocumentDbCollectionSink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
return nil, false
}
+// AsJSONSink is the BasicCopySink implementation for DocumentDbCollectionSink.
+func (ddcs DocumentDbCollectionSink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSink is the BasicCopySink implementation for DocumentDbCollectionSink.
func (ddcs DocumentDbCollectionSink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
return nil, false
@@ -59438,7 +66158,7 @@ type DocumentDbCollectionSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -59565,6 +66285,11 @@ func (ddcs DocumentDbCollectionSource) AsMarketoSource() (*MarketoSource, bool)
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for DocumentDbCollectionSource.
+func (ddcs DocumentDbCollectionSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for DocumentDbCollectionSource.
func (ddcs DocumentDbCollectionSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -59785,6 +66510,11 @@ func (ddcs DocumentDbCollectionSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for DocumentDbCollectionSource.
+func (ddcs DocumentDbCollectionSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for DocumentDbCollectionSource.
func (ddcs DocumentDbCollectionSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -59860,6 +66590,11 @@ func (ddcs DocumentDbCollectionSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for DocumentDbCollectionSource.
+func (ddcs DocumentDbCollectionSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for DocumentDbCollectionSource.
func (ddcs DocumentDbCollectionSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -59870,6 +66605,11 @@ func (ddcs DocumentDbCollectionSource) AsParquetSource() (*ParquetSource, bool)
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for DocumentDbCollectionSource.
+func (ddcs DocumentDbCollectionSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for DocumentDbCollectionSource.
func (ddcs DocumentDbCollectionSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -59961,6 +66701,16 @@ func (ddcs *DocumentDbCollectionSource) UnmarshalJSON(body []byte) error {
return nil
}
+// DrillDatasetTypeProperties drill Dataset Properties
+type DrillDatasetTypeProperties struct {
+ // TableName - This property will be retired. Please consider using schema + table properties instead.
+ TableName interface{} `json:"tableName,omitempty"`
+ // Table - The table name of the Drill. Type: string (or Expression with resultType string).
+ Table interface{} `json:"table,omitempty"`
+ // Schema - The schema name of the Drill. Type: string (or Expression with resultType string).
+ Schema interface{} `json:"schema,omitempty"`
+}
+
// DrillLinkedService drill server linked service.
type DrillLinkedService struct {
// DrillLinkedServiceTypeProperties - Drill server linked service properties.
@@ -59975,7 +66725,7 @@ type DrillLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -60127,6 +66877,11 @@ func (dls DrillLinkedService) AsMarketoLinkedService() (*MarketoLinkedService, b
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for DrillLinkedService.
+func (dls DrillLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for DrillLinkedService.
func (dls DrillLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -60570,7 +67325,7 @@ type DrillSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -60694,6 +67449,11 @@ func (ds DrillSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for DrillSource.
+func (ds DrillSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for DrillSource.
func (ds DrillSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -60914,6 +67674,11 @@ func (ds DrillSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for DrillSource.
+func (ds DrillSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for DrillSource.
func (ds DrillSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -60989,6 +67754,11 @@ func (ds DrillSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for DrillSource.
+func (ds DrillSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for DrillSource.
func (ds DrillSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -60999,6 +67769,11 @@ func (ds DrillSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for DrillSource.
+func (ds DrillSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for DrillSource.
func (ds DrillSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -61083,8 +67858,8 @@ func (ds *DrillSource) UnmarshalJSON(body []byte) error {
// DrillTableDataset drill server dataset.
type DrillTableDataset struct {
- // GenericDatasetTypeProperties - Properties specific to this dataset type.
- *GenericDatasetTypeProperties `json:"typeProperties,omitempty"`
+ // DrillDatasetTypeProperties - Properties specific to this dataset type.
+ *DrillDatasetTypeProperties `json:"typeProperties,omitempty"`
// AdditionalProperties - Unmatched properties from the message are deserialized this collection
AdditionalProperties map[string]interface{} `json:""`
// Description - Dataset description.
@@ -61101,7 +67876,7 @@ type DrillTableDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -61109,8 +67884,8 @@ type DrillTableDataset struct {
func (dtd DrillTableDataset) MarshalJSON() ([]byte, error) {
dtd.Type = TypeDrillTable
objectMap := make(map[string]interface{})
- if dtd.GenericDatasetTypeProperties != nil {
- objectMap["typeProperties"] = dtd.GenericDatasetTypeProperties
+ if dtd.DrillDatasetTypeProperties != nil {
+ objectMap["typeProperties"] = dtd.DrillDatasetTypeProperties
}
if dtd.Description != nil {
objectMap["description"] = dtd.Description
@@ -61237,6 +68012,11 @@ func (dtd DrillTableDataset) AsMarketoObjectDataset() (*MarketoObjectDataset, bo
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for DrillTableDataset.
+func (dtd DrillTableDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for DrillTableDataset.
func (dtd DrillTableDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -61362,6 +68142,16 @@ func (dtd DrillTableDataset) AsSapCloudForCustomerResourceDataset() (*SapCloudFo
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for DrillTableDataset.
+func (dtd DrillTableDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for DrillTableDataset.
+func (dtd DrillTableDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for DrillTableDataset.
func (dtd DrillTableDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -61402,6 +68192,16 @@ func (dtd DrillTableDataset) AsRelationalTableDataset() (*RelationalTableDataset
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for DrillTableDataset.
+func (dtd DrillTableDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for DrillTableDataset.
+func (dtd DrillTableDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for DrillTableDataset.
func (dtd DrillTableDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -61517,6 +68317,11 @@ func (dtd DrillTableDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for DrillTableDataset.
+func (dtd DrillTableDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for DrillTableDataset.
func (dtd DrillTableDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -61527,6 +68332,11 @@ func (dtd DrillTableDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for DrillTableDataset.
+func (dtd DrillTableDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for DrillTableDataset.
func (dtd DrillTableDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -61553,12 +68363,12 @@ func (dtd *DrillTableDataset) UnmarshalJSON(body []byte) error {
switch k {
case "typeProperties":
if v != nil {
- var genericDatasetTypeProperties GenericDatasetTypeProperties
- err = json.Unmarshal(*v, &genericDatasetTypeProperties)
+ var drillDatasetTypeProperties DrillDatasetTypeProperties
+ err = json.Unmarshal(*v, &drillDatasetTypeProperties)
if err != nil {
return err
}
- dtd.GenericDatasetTypeProperties = &genericDatasetTypeProperties
+ dtd.DrillDatasetTypeProperties = &drillDatasetTypeProperties
}
default:
if v != nil {
@@ -61664,7 +68474,7 @@ type DynamicsAXLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -61816,6 +68626,11 @@ func (dals DynamicsAXLinkedService) AsMarketoLinkedService() (*MarketoLinkedServ
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for DynamicsAXLinkedService.
+func (dals DynamicsAXLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for DynamicsAXLinkedService.
func (dals DynamicsAXLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -62341,7 +69156,7 @@ type DynamicsAXResourceDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -62477,6 +69292,11 @@ func (dard DynamicsAXResourceDataset) AsMarketoObjectDataset() (*MarketoObjectDa
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for DynamicsAXResourceDataset.
+func (dard DynamicsAXResourceDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for DynamicsAXResourceDataset.
func (dard DynamicsAXResourceDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -62602,6 +69422,16 @@ func (dard DynamicsAXResourceDataset) AsSapCloudForCustomerResourceDataset() (*S
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for DynamicsAXResourceDataset.
+func (dard DynamicsAXResourceDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for DynamicsAXResourceDataset.
+func (dard DynamicsAXResourceDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for DynamicsAXResourceDataset.
func (dard DynamicsAXResourceDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -62642,6 +69472,16 @@ func (dard DynamicsAXResourceDataset) AsRelationalTableDataset() (*RelationalTab
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for DynamicsAXResourceDataset.
+func (dard DynamicsAXResourceDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for DynamicsAXResourceDataset.
+func (dard DynamicsAXResourceDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for DynamicsAXResourceDataset.
func (dard DynamicsAXResourceDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -62757,6 +69597,11 @@ func (dard DynamicsAXResourceDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for DynamicsAXResourceDataset.
+func (dard DynamicsAXResourceDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for DynamicsAXResourceDataset.
func (dard DynamicsAXResourceDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -62767,6 +69612,11 @@ func (dard DynamicsAXResourceDataset) AsParquetDataset() (*ParquetDataset, bool)
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for DynamicsAXResourceDataset.
+func (dard DynamicsAXResourceDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for DynamicsAXResourceDataset.
func (dard DynamicsAXResourceDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -62908,7 +69758,7 @@ type DynamicsAXSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -63032,6 +69882,11 @@ func (das DynamicsAXSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for DynamicsAXSource.
+func (das DynamicsAXSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for DynamicsAXSource.
func (das DynamicsAXSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -63252,6 +70107,11 @@ func (das DynamicsAXSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for DynamicsAXSource.
+func (das DynamicsAXSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for DynamicsAXSource.
func (das DynamicsAXSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -63327,6 +70187,11 @@ func (das DynamicsAXSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for DynamicsAXSource.
+func (das DynamicsAXSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for DynamicsAXSource.
func (das DynamicsAXSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -63337,6 +70202,11 @@ func (das DynamicsAXSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for DynamicsAXSource.
+func (das DynamicsAXSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for DynamicsAXSource.
func (das DynamicsAXSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -63439,7 +70309,7 @@ type DynamicsCrmEntityDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -63575,6 +70445,11 @@ func (dced DynamicsCrmEntityDataset) AsMarketoObjectDataset() (*MarketoObjectDat
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for DynamicsCrmEntityDataset.
+func (dced DynamicsCrmEntityDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for DynamicsCrmEntityDataset.
func (dced DynamicsCrmEntityDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -63700,6 +70575,16 @@ func (dced DynamicsCrmEntityDataset) AsSapCloudForCustomerResourceDataset() (*Sa
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for DynamicsCrmEntityDataset.
+func (dced DynamicsCrmEntityDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for DynamicsCrmEntityDataset.
+func (dced DynamicsCrmEntityDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for DynamicsCrmEntityDataset.
func (dced DynamicsCrmEntityDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -63740,6 +70625,16 @@ func (dced DynamicsCrmEntityDataset) AsRelationalTableDataset() (*RelationalTabl
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for DynamicsCrmEntityDataset.
+func (dced DynamicsCrmEntityDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for DynamicsCrmEntityDataset.
+func (dced DynamicsCrmEntityDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for DynamicsCrmEntityDataset.
func (dced DynamicsCrmEntityDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -63855,6 +70750,11 @@ func (dced DynamicsCrmEntityDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for DynamicsCrmEntityDataset.
+func (dced DynamicsCrmEntityDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for DynamicsCrmEntityDataset.
func (dced DynamicsCrmEntityDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -63865,6 +70765,11 @@ func (dced DynamicsCrmEntityDataset) AsParquetDataset() (*ParquetDataset, bool)
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for DynamicsCrmEntityDataset.
+func (dced DynamicsCrmEntityDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for DynamicsCrmEntityDataset.
func (dced DynamicsCrmEntityDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -64008,7 +70913,7 @@ type DynamicsCrmLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -64160,6 +71065,11 @@ func (dcls DynamicsCrmLinkedService) AsMarketoLinkedService() (*MarketoLinkedSer
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for DynamicsCrmLinkedService.
+func (dcls DynamicsCrmLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for DynamicsCrmLinkedService.
func (dcls DynamicsCrmLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -64716,7 +71626,7 @@ type DynamicsCrmSink struct {
SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzurePostgreSQLSink', 'TypeDelimitedTextSink'
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
Type TypeBasicCopySink `json:"type,omitempty"`
}
@@ -64874,6 +71784,11 @@ func (dcs DynamicsCrmSink) AsParquetSink() (*ParquetSink, bool) {
return nil, false
}
+// AsAvroSink is the BasicCopySink implementation for DynamicsCrmSink.
+func (dcs DynamicsCrmSink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
// AsAzureTableSink is the BasicCopySink implementation for DynamicsCrmSink.
func (dcs DynamicsCrmSink) AsAzureTableSink() (*AzureTableSink, bool) {
return nil, false
@@ -64889,11 +71804,21 @@ func (dcs DynamicsCrmSink) AsSapCloudForCustomerSink() (*SapCloudForCustomerSink
return nil, false
}
+// AsAzureMySQLSink is the BasicCopySink implementation for DynamicsCrmSink.
+func (dcs DynamicsCrmSink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
// AsAzurePostgreSQLSink is the BasicCopySink implementation for DynamicsCrmSink.
func (dcs DynamicsCrmSink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
return nil, false
}
+// AsJSONSink is the BasicCopySink implementation for DynamicsCrmSink.
+func (dcs DynamicsCrmSink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSink is the BasicCopySink implementation for DynamicsCrmSink.
func (dcs DynamicsCrmSink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
return nil, false
@@ -65020,7 +71945,7 @@ type DynamicsCrmSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -65144,6 +72069,11 @@ func (dcs DynamicsCrmSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for DynamicsCrmSource.
+func (dcs DynamicsCrmSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for DynamicsCrmSource.
func (dcs DynamicsCrmSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -65364,6 +72294,11 @@ func (dcs DynamicsCrmSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for DynamicsCrmSource.
+func (dcs DynamicsCrmSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for DynamicsCrmSource.
func (dcs DynamicsCrmSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -65439,6 +72374,11 @@ func (dcs DynamicsCrmSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for DynamicsCrmSource.
+func (dcs DynamicsCrmSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for DynamicsCrmSource.
func (dcs DynamicsCrmSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -65449,6 +72389,11 @@ func (dcs DynamicsCrmSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for DynamicsCrmSource.
+func (dcs DynamicsCrmSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for DynamicsCrmSource.
func (dcs DynamicsCrmSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -65551,7 +72496,7 @@ type DynamicsEntityDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -65687,6 +72632,11 @@ func (ded DynamicsEntityDataset) AsMarketoObjectDataset() (*MarketoObjectDataset
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for DynamicsEntityDataset.
+func (ded DynamicsEntityDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for DynamicsEntityDataset.
func (ded DynamicsEntityDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -65812,6 +72762,16 @@ func (ded DynamicsEntityDataset) AsSapCloudForCustomerResourceDataset() (*SapClo
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for DynamicsEntityDataset.
+func (ded DynamicsEntityDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for DynamicsEntityDataset.
+func (ded DynamicsEntityDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for DynamicsEntityDataset.
func (ded DynamicsEntityDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -65852,6 +72812,16 @@ func (ded DynamicsEntityDataset) AsRelationalTableDataset() (*RelationalTableDat
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for DynamicsEntityDataset.
+func (ded DynamicsEntityDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for DynamicsEntityDataset.
+func (ded DynamicsEntityDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for DynamicsEntityDataset.
func (ded DynamicsEntityDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -65967,6 +72937,11 @@ func (ded DynamicsEntityDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for DynamicsEntityDataset.
+func (ded DynamicsEntityDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for DynamicsEntityDataset.
func (ded DynamicsEntityDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -65977,6 +72952,11 @@ func (ded DynamicsEntityDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for DynamicsEntityDataset.
+func (ded DynamicsEntityDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for DynamicsEntityDataset.
func (ded DynamicsEntityDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -66120,7 +73100,7 @@ type DynamicsLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -66272,6 +73252,11 @@ func (dls DynamicsLinkedService) AsMarketoLinkedService() (*MarketoLinkedService
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for DynamicsLinkedService.
+func (dls DynamicsLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for DynamicsLinkedService.
func (dls DynamicsLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -66828,7 +73813,7 @@ type DynamicsSink struct {
SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzurePostgreSQLSink', 'TypeDelimitedTextSink'
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
Type TypeBasicCopySink `json:"type,omitempty"`
}
@@ -66986,6 +73971,11 @@ func (ds DynamicsSink) AsParquetSink() (*ParquetSink, bool) {
return nil, false
}
+// AsAvroSink is the BasicCopySink implementation for DynamicsSink.
+func (ds DynamicsSink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
// AsAzureTableSink is the BasicCopySink implementation for DynamicsSink.
func (ds DynamicsSink) AsAzureTableSink() (*AzureTableSink, bool) {
return nil, false
@@ -67001,11 +73991,21 @@ func (ds DynamicsSink) AsSapCloudForCustomerSink() (*SapCloudForCustomerSink, bo
return nil, false
}
+// AsAzureMySQLSink is the BasicCopySink implementation for DynamicsSink.
+func (ds DynamicsSink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
// AsAzurePostgreSQLSink is the BasicCopySink implementation for DynamicsSink.
func (ds DynamicsSink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
return nil, false
}
+// AsJSONSink is the BasicCopySink implementation for DynamicsSink.
+func (ds DynamicsSink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSink is the BasicCopySink implementation for DynamicsSink.
func (ds DynamicsSink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
return nil, false
@@ -67132,7 +74132,7 @@ type DynamicsSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -67256,6 +74256,11 @@ func (ds DynamicsSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for DynamicsSource.
+func (ds DynamicsSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for DynamicsSource.
func (ds DynamicsSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -67476,6 +74481,11 @@ func (ds DynamicsSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for DynamicsSource.
+func (ds DynamicsSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for DynamicsSource.
func (ds DynamicsSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -67551,6 +74561,11 @@ func (ds DynamicsSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for DynamicsSource.
+func (ds DynamicsSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for DynamicsSource.
func (ds DynamicsSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -67561,6 +74576,11 @@ func (ds DynamicsSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for DynamicsSource.
+func (ds DynamicsSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for DynamicsSource.
func (ds DynamicsSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -67657,7 +74677,7 @@ type EloquaLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -67809,6 +74829,11 @@ func (els EloquaLinkedService) AsMarketoLinkedService() (*MarketoLinkedService,
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for EloquaLinkedService.
+func (els EloquaLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for EloquaLinkedService.
func (els EloquaLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -68345,7 +75370,7 @@ type EloquaObjectDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -68481,6 +75506,11 @@ func (eod EloquaObjectDataset) AsMarketoObjectDataset() (*MarketoObjectDataset,
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for EloquaObjectDataset.
+func (eod EloquaObjectDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for EloquaObjectDataset.
func (eod EloquaObjectDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -68606,6 +75636,16 @@ func (eod EloquaObjectDataset) AsSapCloudForCustomerResourceDataset() (*SapCloud
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for EloquaObjectDataset.
+func (eod EloquaObjectDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for EloquaObjectDataset.
+func (eod EloquaObjectDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for EloquaObjectDataset.
func (eod EloquaObjectDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -68646,6 +75686,16 @@ func (eod EloquaObjectDataset) AsRelationalTableDataset() (*RelationalTableDatas
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for EloquaObjectDataset.
+func (eod EloquaObjectDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for EloquaObjectDataset.
+func (eod EloquaObjectDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for EloquaObjectDataset.
func (eod EloquaObjectDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -68761,6 +75811,11 @@ func (eod EloquaObjectDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for EloquaObjectDataset.
+func (eod EloquaObjectDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for EloquaObjectDataset.
func (eod EloquaObjectDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -68771,6 +75826,11 @@ func (eod EloquaObjectDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for EloquaObjectDataset.
+func (eod EloquaObjectDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for EloquaObjectDataset.
func (eod EloquaObjectDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -68906,7 +75966,7 @@ type EloquaSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -69030,6 +76090,11 @@ func (es EloquaSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for EloquaSource.
+func (es EloquaSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for EloquaSource.
func (es EloquaSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -69250,6 +76315,11 @@ func (es EloquaSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for EloquaSource.
+func (es EloquaSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for EloquaSource.
func (es EloquaSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -69325,6 +76395,11 @@ func (es EloquaSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for EloquaSource.
+func (es EloquaSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for EloquaSource.
func (es EloquaSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -69335,6 +76410,11 @@ func (es EloquaSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for EloquaSource.
+func (es EloquaSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for EloquaSource.
func (es EloquaSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -71347,7 +78427,7 @@ type FileServerLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -71499,6 +78579,11 @@ func (fsls FileServerLinkedService) AsMarketoLinkedService() (*MarketoLinkedServ
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for FileServerLinkedService.
+func (fsls FileServerLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for FileServerLinkedService.
func (fsls FileServerLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -72224,32 +79309,63 @@ func (fsrs *FileServerReadSettings) UnmarshalJSON(body []byte) error {
type FileServerWriteSettings struct {
// AdditionalProperties - Unmatched properties from the message are deserialized this collection
AdditionalProperties map[string]interface{} `json:""`
- // Type - The write setting type.
- Type *string `json:"type,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
// CopyBehavior - The type of copy behavior for copy sink.
CopyBehavior interface{} `json:"copyBehavior,omitempty"`
+ // Type - Possible values include: 'TypeStoreWriteSettings', 'TypeFileServerWriteSettings', 'TypeAzureDataLakeStoreWriteSettings', 'TypeAzureBlobFSWriteSettings', 'TypeAzureBlobStorageWriteSettings'
+ Type TypeBasicStoreWriteSettings `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for FileServerWriteSettings.
func (fsws FileServerWriteSettings) MarshalJSON() ([]byte, error) {
+ fsws.Type = TypeFileServerWriteSettings
objectMap := make(map[string]interface{})
- if fsws.Type != nil {
- objectMap["type"] = fsws.Type
- }
if fsws.MaxConcurrentConnections != nil {
objectMap["maxConcurrentConnections"] = fsws.MaxConcurrentConnections
}
if fsws.CopyBehavior != nil {
objectMap["copyBehavior"] = fsws.CopyBehavior
}
+ if fsws.Type != "" {
+ objectMap["type"] = fsws.Type
+ }
for k, v := range fsws.AdditionalProperties {
objectMap[k] = v
}
return json.Marshal(objectMap)
}
+// AsFileServerWriteSettings is the BasicStoreWriteSettings implementation for FileServerWriteSettings.
+func (fsws FileServerWriteSettings) AsFileServerWriteSettings() (*FileServerWriteSettings, bool) {
+ return &fsws, true
+}
+
+// AsAzureDataLakeStoreWriteSettings is the BasicStoreWriteSettings implementation for FileServerWriteSettings.
+func (fsws FileServerWriteSettings) AsAzureDataLakeStoreWriteSettings() (*AzureDataLakeStoreWriteSettings, bool) {
+ return nil, false
+}
+
+// AsAzureBlobFSWriteSettings is the BasicStoreWriteSettings implementation for FileServerWriteSettings.
+func (fsws FileServerWriteSettings) AsAzureBlobFSWriteSettings() (*AzureBlobFSWriteSettings, bool) {
+ return nil, false
+}
+
+// AsAzureBlobStorageWriteSettings is the BasicStoreWriteSettings implementation for FileServerWriteSettings.
+func (fsws FileServerWriteSettings) AsAzureBlobStorageWriteSettings() (*AzureBlobStorageWriteSettings, bool) {
+ return nil, false
+}
+
+// AsStoreWriteSettings is the BasicStoreWriteSettings implementation for FileServerWriteSettings.
+func (fsws FileServerWriteSettings) AsStoreWriteSettings() (*StoreWriteSettings, bool) {
+ return nil, false
+}
+
+// AsBasicStoreWriteSettings is the BasicStoreWriteSettings implementation for FileServerWriteSettings.
+func (fsws FileServerWriteSettings) AsBasicStoreWriteSettings() (BasicStoreWriteSettings, bool) {
+ return &fsws, true
+}
+
// UnmarshalJSON is the custom unmarshaler for FileServerWriteSettings struct.
func (fsws *FileServerWriteSettings) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
@@ -72271,15 +79387,6 @@ func (fsws *FileServerWriteSettings) UnmarshalJSON(body []byte) error {
}
fsws.AdditionalProperties[k] = additionalProperties
}
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- fsws.Type = &typeVar
- }
case "maxConcurrentConnections":
if v != nil {
var maxConcurrentConnections interface{}
@@ -72298,6 +79405,15 @@ func (fsws *FileServerWriteSettings) UnmarshalJSON(body []byte) error {
}
fsws.CopyBehavior = copyBehavior
}
+ case "type":
+ if v != nil {
+ var typeVar TypeBasicStoreWriteSettings
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ fsws.Type = typeVar
+ }
}
}
@@ -72324,7 +79440,7 @@ type FileShareDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -72460,6 +79576,11 @@ func (fsd FileShareDataset) AsMarketoObjectDataset() (*MarketoObjectDataset, boo
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for FileShareDataset.
+func (fsd FileShareDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for FileShareDataset.
func (fsd FileShareDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -72585,6 +79706,16 @@ func (fsd FileShareDataset) AsSapCloudForCustomerResourceDataset() (*SapCloudFor
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for FileShareDataset.
+func (fsd FileShareDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for FileShareDataset.
+func (fsd FileShareDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for FileShareDataset.
func (fsd FileShareDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -72625,6 +79756,16 @@ func (fsd FileShareDataset) AsRelationalTableDataset() (*RelationalTableDataset,
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for FileShareDataset.
+func (fsd FileShareDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for FileShareDataset.
+func (fsd FileShareDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for FileShareDataset.
func (fsd FileShareDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -72740,6 +79881,11 @@ func (fsd FileShareDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for FileShareDataset.
+func (fsd FileShareDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for FileShareDataset.
func (fsd FileShareDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -72750,6 +79896,11 @@ func (fsd FileShareDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for FileShareDataset.
+func (fsd FileShareDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for FileShareDataset.
func (fsd FileShareDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -72983,7 +80134,7 @@ type FileSystemSink struct {
SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzurePostgreSQLSink', 'TypeDelimitedTextSink'
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
Type TypeBasicCopySink `json:"type,omitempty"`
}
@@ -73138,6 +80289,11 @@ func (fss FileSystemSink) AsParquetSink() (*ParquetSink, bool) {
return nil, false
}
+// AsAvroSink is the BasicCopySink implementation for FileSystemSink.
+func (fss FileSystemSink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
// AsAzureTableSink is the BasicCopySink implementation for FileSystemSink.
func (fss FileSystemSink) AsAzureTableSink() (*AzureTableSink, bool) {
return nil, false
@@ -73153,11 +80309,21 @@ func (fss FileSystemSink) AsSapCloudForCustomerSink() (*SapCloudForCustomerSink,
return nil, false
}
+// AsAzureMySQLSink is the BasicCopySink implementation for FileSystemSink.
+func (fss FileSystemSink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
// AsAzurePostgreSQLSink is the BasicCopySink implementation for FileSystemSink.
func (fss FileSystemSink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
return nil, false
}
+// AsJSONSink is the BasicCopySink implementation for FileSystemSink.
+func (fss FileSystemSink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSink is the BasicCopySink implementation for FileSystemSink.
func (fss FileSystemSink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
return nil, false
@@ -73275,7 +80441,7 @@ type FileSystemSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -73399,6 +80565,11 @@ func (fss FileSystemSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for FileSystemSource.
+func (fss FileSystemSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for FileSystemSource.
func (fss FileSystemSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -73619,6 +80790,11 @@ func (fss FileSystemSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for FileSystemSource.
+func (fss FileSystemSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for FileSystemSource.
func (fss FileSystemSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -73694,6 +80870,11 @@ func (fss FileSystemSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for FileSystemSource.
+func (fss FileSystemSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for FileSystemSource.
func (fss FileSystemSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -73704,6 +80885,11 @@ func (fss FileSystemSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for FileSystemSource.
+func (fss FileSystemSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for FileSystemSource.
func (fss FileSystemSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -74732,7 +81918,7 @@ type FtpServerLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -74884,6 +82070,11 @@ func (fsls FtpServerLinkedService) AsMarketoLinkedService() (*MarketoLinkedServi
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for FtpServerLinkedService.
+func (fsls FtpServerLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for FtpServerLinkedService.
func (fsls FtpServerLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -75886,7 +83077,7 @@ type GoogleAdWordsLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -76038,6 +83229,11 @@ func (gawls GoogleAdWordsLinkedService) AsMarketoLinkedService() (*MarketoLinked
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for GoogleAdWordsLinkedService.
+func (gawls GoogleAdWordsLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for GoogleAdWordsLinkedService.
func (gawls GoogleAdWordsLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -76615,7 +83811,7 @@ type GoogleAdWordsObjectDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -76751,6 +83947,11 @@ func (gawod GoogleAdWordsObjectDataset) AsMarketoObjectDataset() (*MarketoObject
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for GoogleAdWordsObjectDataset.
+func (gawod GoogleAdWordsObjectDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for GoogleAdWordsObjectDataset.
func (gawod GoogleAdWordsObjectDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -76876,6 +84077,16 @@ func (gawod GoogleAdWordsObjectDataset) AsSapCloudForCustomerResourceDataset() (
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for GoogleAdWordsObjectDataset.
+func (gawod GoogleAdWordsObjectDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for GoogleAdWordsObjectDataset.
+func (gawod GoogleAdWordsObjectDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for GoogleAdWordsObjectDataset.
func (gawod GoogleAdWordsObjectDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -76916,6 +84127,16 @@ func (gawod GoogleAdWordsObjectDataset) AsRelationalTableDataset() (*RelationalT
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for GoogleAdWordsObjectDataset.
+func (gawod GoogleAdWordsObjectDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for GoogleAdWordsObjectDataset.
+func (gawod GoogleAdWordsObjectDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for GoogleAdWordsObjectDataset.
func (gawod GoogleAdWordsObjectDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -77031,6 +84252,11 @@ func (gawod GoogleAdWordsObjectDataset) AsBinaryDataset() (*BinaryDataset, bool)
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for GoogleAdWordsObjectDataset.
+func (gawod GoogleAdWordsObjectDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for GoogleAdWordsObjectDataset.
func (gawod GoogleAdWordsObjectDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -77041,6 +84267,11 @@ func (gawod GoogleAdWordsObjectDataset) AsParquetDataset() (*ParquetDataset, boo
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for GoogleAdWordsObjectDataset.
+func (gawod GoogleAdWordsObjectDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for GoogleAdWordsObjectDataset.
func (gawod GoogleAdWordsObjectDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -77176,7 +84407,7 @@ type GoogleAdWordsSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -77300,6 +84531,11 @@ func (gaws GoogleAdWordsSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for GoogleAdWordsSource.
+func (gaws GoogleAdWordsSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for GoogleAdWordsSource.
func (gaws GoogleAdWordsSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -77520,6 +84756,11 @@ func (gaws GoogleAdWordsSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for GoogleAdWordsSource.
+func (gaws GoogleAdWordsSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for GoogleAdWordsSource.
func (gaws GoogleAdWordsSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -77595,6 +84836,11 @@ func (gaws GoogleAdWordsSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for GoogleAdWordsSource.
+func (gaws GoogleAdWordsSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for GoogleAdWordsSource.
func (gaws GoogleAdWordsSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -77605,6 +84851,11 @@ func (gaws GoogleAdWordsSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for GoogleAdWordsSource.
+func (gaws GoogleAdWordsSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for GoogleAdWordsSource.
func (gaws GoogleAdWordsSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -77687,6 +84938,16 @@ func (gaws *GoogleAdWordsSource) UnmarshalJSON(body []byte) error {
return nil
}
+// GoogleBigQueryDatasetTypeProperties google BigQuery Dataset Properties
+type GoogleBigQueryDatasetTypeProperties struct {
+ // TableName - This property will be retired. Please consider using database + table properties instead.
+ TableName interface{} `json:"tableName,omitempty"`
+ // Table - The table name of the Google BigQuery. Type: string (or Expression with resultType string).
+ Table interface{} `json:"table,omitempty"`
+ // Dataset - The database name of the Google BigQuery. Type: string (or Expression with resultType string).
+ Dataset interface{} `json:"dataset,omitempty"`
+}
+
// GoogleBigQueryLinkedService google BigQuery service linked service.
type GoogleBigQueryLinkedService struct {
// GoogleBigQueryLinkedServiceTypeProperties - Google BigQuery service linked service properties.
@@ -77701,7 +84962,7 @@ type GoogleBigQueryLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -77853,6 +85114,11 @@ func (gbqls GoogleBigQueryLinkedService) AsMarketoLinkedService() (*MarketoLinke
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for GoogleBigQueryLinkedService.
+func (gbqls GoogleBigQueryLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for GoogleBigQueryLinkedService.
func (gbqls GoogleBigQueryLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -78424,8 +85690,8 @@ func (gbqlstp *GoogleBigQueryLinkedServiceTypeProperties) UnmarshalJSON(body []b
// GoogleBigQueryObjectDataset google BigQuery service dataset.
type GoogleBigQueryObjectDataset struct {
- // GenericDatasetTypeProperties - Properties specific to this dataset type.
- *GenericDatasetTypeProperties `json:"typeProperties,omitempty"`
+ // GoogleBigQueryDatasetTypeProperties - Properties specific to this dataset type.
+ *GoogleBigQueryDatasetTypeProperties `json:"typeProperties,omitempty"`
// AdditionalProperties - Unmatched properties from the message are deserialized this collection
AdditionalProperties map[string]interface{} `json:""`
// Description - Dataset description.
@@ -78442,7 +85708,7 @@ type GoogleBigQueryObjectDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -78450,8 +85716,8 @@ type GoogleBigQueryObjectDataset struct {
func (gbqod GoogleBigQueryObjectDataset) MarshalJSON() ([]byte, error) {
gbqod.Type = TypeGoogleBigQueryObject
objectMap := make(map[string]interface{})
- if gbqod.GenericDatasetTypeProperties != nil {
- objectMap["typeProperties"] = gbqod.GenericDatasetTypeProperties
+ if gbqod.GoogleBigQueryDatasetTypeProperties != nil {
+ objectMap["typeProperties"] = gbqod.GoogleBigQueryDatasetTypeProperties
}
if gbqod.Description != nil {
objectMap["description"] = gbqod.Description
@@ -78578,6 +85844,11 @@ func (gbqod GoogleBigQueryObjectDataset) AsMarketoObjectDataset() (*MarketoObjec
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for GoogleBigQueryObjectDataset.
+func (gbqod GoogleBigQueryObjectDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for GoogleBigQueryObjectDataset.
func (gbqod GoogleBigQueryObjectDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -78703,6 +85974,16 @@ func (gbqod GoogleBigQueryObjectDataset) AsSapCloudForCustomerResourceDataset()
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for GoogleBigQueryObjectDataset.
+func (gbqod GoogleBigQueryObjectDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for GoogleBigQueryObjectDataset.
+func (gbqod GoogleBigQueryObjectDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for GoogleBigQueryObjectDataset.
func (gbqod GoogleBigQueryObjectDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -78743,6 +86024,16 @@ func (gbqod GoogleBigQueryObjectDataset) AsRelationalTableDataset() (*Relational
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for GoogleBigQueryObjectDataset.
+func (gbqod GoogleBigQueryObjectDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for GoogleBigQueryObjectDataset.
+func (gbqod GoogleBigQueryObjectDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for GoogleBigQueryObjectDataset.
func (gbqod GoogleBigQueryObjectDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -78858,6 +86149,11 @@ func (gbqod GoogleBigQueryObjectDataset) AsBinaryDataset() (*BinaryDataset, bool
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for GoogleBigQueryObjectDataset.
+func (gbqod GoogleBigQueryObjectDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for GoogleBigQueryObjectDataset.
func (gbqod GoogleBigQueryObjectDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -78868,6 +86164,11 @@ func (gbqod GoogleBigQueryObjectDataset) AsParquetDataset() (*ParquetDataset, bo
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for GoogleBigQueryObjectDataset.
+func (gbqod GoogleBigQueryObjectDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for GoogleBigQueryObjectDataset.
func (gbqod GoogleBigQueryObjectDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -78894,12 +86195,12 @@ func (gbqod *GoogleBigQueryObjectDataset) UnmarshalJSON(body []byte) error {
switch k {
case "typeProperties":
if v != nil {
- var genericDatasetTypeProperties GenericDatasetTypeProperties
- err = json.Unmarshal(*v, &genericDatasetTypeProperties)
+ var googleBigQueryDatasetTypeProperties GoogleBigQueryDatasetTypeProperties
+ err = json.Unmarshal(*v, &googleBigQueryDatasetTypeProperties)
if err != nil {
return err
}
- gbqod.GenericDatasetTypeProperties = &genericDatasetTypeProperties
+ gbqod.GoogleBigQueryDatasetTypeProperties = &googleBigQueryDatasetTypeProperties
}
default:
if v != nil {
@@ -79003,7 +86304,7 @@ type GoogleBigQuerySource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -79127,6 +86428,11 @@ func (gbqs GoogleBigQuerySource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for GoogleBigQuerySource.
+func (gbqs GoogleBigQuerySource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for GoogleBigQuerySource.
func (gbqs GoogleBigQuerySource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -79347,6 +86653,11 @@ func (gbqs GoogleBigQuerySource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for GoogleBigQuerySource.
+func (gbqs GoogleBigQuerySource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for GoogleBigQuerySource.
func (gbqs GoogleBigQuerySource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -79422,6 +86733,11 @@ func (gbqs GoogleBigQuerySource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for GoogleBigQuerySource.
+func (gbqs GoogleBigQuerySource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for GoogleBigQuerySource.
func (gbqs GoogleBigQuerySource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -79432,6 +86748,11 @@ func (gbqs GoogleBigQuerySource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for GoogleBigQuerySource.
+func (gbqs GoogleBigQuerySource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for GoogleBigQuerySource.
func (gbqs GoogleBigQuerySource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -79514,6 +86835,16 @@ func (gbqs *GoogleBigQuerySource) UnmarshalJSON(body []byte) error {
return nil
}
+// GreenplumDatasetTypeProperties greenplum Dataset Properties
+type GreenplumDatasetTypeProperties struct {
+ // TableName - This property will be retired. Please consider using schema + table properties instead.
+ TableName interface{} `json:"tableName,omitempty"`
+ // Table - The table name of Greenplum. Type: string (or Expression with resultType string).
+ Table interface{} `json:"table,omitempty"`
+ // Schema - The schema name of Greenplum. Type: string (or Expression with resultType string).
+ Schema interface{} `json:"schema,omitempty"`
+}
+
// GreenplumLinkedService greenplum Database linked service.
type GreenplumLinkedService struct {
// GreenplumLinkedServiceTypeProperties - Greenplum Database linked service properties.
@@ -79528,7 +86859,7 @@ type GreenplumLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -79680,6 +87011,11 @@ func (gls GreenplumLinkedService) AsMarketoLinkedService() (*MarketoLinkedServic
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for GreenplumLinkedService.
+func (gls GreenplumLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for GreenplumLinkedService.
func (gls GreenplumLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -80123,7 +87459,7 @@ type GreenplumSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -80247,6 +87583,11 @@ func (gs GreenplumSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for GreenplumSource.
+func (gs GreenplumSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for GreenplumSource.
func (gs GreenplumSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -80467,6 +87808,11 @@ func (gs GreenplumSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for GreenplumSource.
+func (gs GreenplumSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for GreenplumSource.
func (gs GreenplumSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -80542,6 +87888,11 @@ func (gs GreenplumSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for GreenplumSource.
+func (gs GreenplumSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for GreenplumSource.
func (gs GreenplumSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -80552,6 +87903,11 @@ func (gs GreenplumSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for GreenplumSource.
+func (gs GreenplumSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for GreenplumSource.
func (gs GreenplumSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -80636,8 +87992,8 @@ func (gs *GreenplumSource) UnmarshalJSON(body []byte) error {
// GreenplumTableDataset greenplum Database dataset.
type GreenplumTableDataset struct {
- // GenericDatasetTypeProperties - Properties specific to this dataset type.
- *GenericDatasetTypeProperties `json:"typeProperties,omitempty"`
+ // GreenplumDatasetTypeProperties - Properties specific to this dataset type.
+ *GreenplumDatasetTypeProperties `json:"typeProperties,omitempty"`
// AdditionalProperties - Unmatched properties from the message are deserialized this collection
AdditionalProperties map[string]interface{} `json:""`
// Description - Dataset description.
@@ -80654,7 +88010,7 @@ type GreenplumTableDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -80662,8 +88018,8 @@ type GreenplumTableDataset struct {
func (gtd GreenplumTableDataset) MarshalJSON() ([]byte, error) {
gtd.Type = TypeGreenplumTable
objectMap := make(map[string]interface{})
- if gtd.GenericDatasetTypeProperties != nil {
- objectMap["typeProperties"] = gtd.GenericDatasetTypeProperties
+ if gtd.GreenplumDatasetTypeProperties != nil {
+ objectMap["typeProperties"] = gtd.GreenplumDatasetTypeProperties
}
if gtd.Description != nil {
objectMap["description"] = gtd.Description
@@ -80790,6 +88146,11 @@ func (gtd GreenplumTableDataset) AsMarketoObjectDataset() (*MarketoObjectDataset
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for GreenplumTableDataset.
+func (gtd GreenplumTableDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for GreenplumTableDataset.
func (gtd GreenplumTableDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -80915,6 +88276,16 @@ func (gtd GreenplumTableDataset) AsSapCloudForCustomerResourceDataset() (*SapClo
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for GreenplumTableDataset.
+func (gtd GreenplumTableDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for GreenplumTableDataset.
+func (gtd GreenplumTableDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for GreenplumTableDataset.
func (gtd GreenplumTableDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -80955,6 +88326,16 @@ func (gtd GreenplumTableDataset) AsRelationalTableDataset() (*RelationalTableDat
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for GreenplumTableDataset.
+func (gtd GreenplumTableDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for GreenplumTableDataset.
+func (gtd GreenplumTableDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for GreenplumTableDataset.
func (gtd GreenplumTableDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -81070,6 +88451,11 @@ func (gtd GreenplumTableDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for GreenplumTableDataset.
+func (gtd GreenplumTableDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for GreenplumTableDataset.
func (gtd GreenplumTableDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -81080,6 +88466,11 @@ func (gtd GreenplumTableDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for GreenplumTableDataset.
+func (gtd GreenplumTableDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for GreenplumTableDataset.
func (gtd GreenplumTableDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -81106,12 +88497,12 @@ func (gtd *GreenplumTableDataset) UnmarshalJSON(body []byte) error {
switch k {
case "typeProperties":
if v != nil {
- var genericDatasetTypeProperties GenericDatasetTypeProperties
- err = json.Unmarshal(*v, &genericDatasetTypeProperties)
+ var greenplumDatasetTypeProperties GreenplumDatasetTypeProperties
+ err = json.Unmarshal(*v, &greenplumDatasetTypeProperties)
if err != nil {
return err
}
- gtd.GenericDatasetTypeProperties = &genericDatasetTypeProperties
+ gtd.GreenplumDatasetTypeProperties = &greenplumDatasetTypeProperties
}
default:
if v != nil {
@@ -81217,7 +88608,7 @@ type HBaseLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -81369,6 +88760,11 @@ func (hbls HBaseLinkedService) AsMarketoLinkedService() (*MarketoLinkedService,
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for HBaseLinkedService.
+func (hbls HBaseLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for HBaseLinkedService.
func (hbls HBaseLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -81949,7 +89345,7 @@ type HBaseObjectDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -82085,6 +89481,11 @@ func (hbod HBaseObjectDataset) AsMarketoObjectDataset() (*MarketoObjectDataset,
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for HBaseObjectDataset.
+func (hbod HBaseObjectDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for HBaseObjectDataset.
func (hbod HBaseObjectDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -82210,6 +89611,16 @@ func (hbod HBaseObjectDataset) AsSapCloudForCustomerResourceDataset() (*SapCloud
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for HBaseObjectDataset.
+func (hbod HBaseObjectDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for HBaseObjectDataset.
+func (hbod HBaseObjectDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for HBaseObjectDataset.
func (hbod HBaseObjectDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -82250,6 +89661,16 @@ func (hbod HBaseObjectDataset) AsRelationalTableDataset() (*RelationalTableDatas
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for HBaseObjectDataset.
+func (hbod HBaseObjectDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for HBaseObjectDataset.
+func (hbod HBaseObjectDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for HBaseObjectDataset.
func (hbod HBaseObjectDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -82365,6 +89786,11 @@ func (hbod HBaseObjectDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for HBaseObjectDataset.
+func (hbod HBaseObjectDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for HBaseObjectDataset.
func (hbod HBaseObjectDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -82375,6 +89801,11 @@ func (hbod HBaseObjectDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for HBaseObjectDataset.
+func (hbod HBaseObjectDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for HBaseObjectDataset.
func (hbod HBaseObjectDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -82510,7 +89941,7 @@ type HBaseSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -82634,6 +90065,11 @@ func (hbs HBaseSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for HBaseSource.
+func (hbs HBaseSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for HBaseSource.
func (hbs HBaseSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -82854,6 +90290,11 @@ func (hbs HBaseSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for HBaseSource.
+func (hbs HBaseSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for HBaseSource.
func (hbs HBaseSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -82929,6 +90370,11 @@ func (hbs HBaseSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for HBaseSource.
+func (hbs HBaseSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for HBaseSource.
func (hbs HBaseSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -82939,6 +90385,11 @@ func (hbs HBaseSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for HBaseSource.
+func (hbs HBaseSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for HBaseSource.
func (hbs HBaseSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -83035,7 +90486,7 @@ type HdfsLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -83187,6 +90638,11 @@ func (hls HdfsLinkedService) AsMarketoLinkedService() (*MarketoLinkedService, bo
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for HdfsLinkedService.
+func (hls HdfsLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for HdfsLinkedService.
func (hls HdfsLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -83947,7 +91403,7 @@ type HdfsSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -84074,6 +91530,11 @@ func (hs HdfsSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for HdfsSource.
+func (hs HdfsSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for HdfsSource.
func (hs HdfsSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -84294,6 +91755,11 @@ func (hs HdfsSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for HdfsSource.
+func (hs HdfsSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for HdfsSource.
func (hs HdfsSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -84369,6 +91835,11 @@ func (hs HdfsSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for HdfsSource.
+func (hs HdfsSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for HdfsSource.
func (hs HdfsSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -84379,6 +91850,11 @@ func (hs HdfsSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for HdfsSource.
+func (hs HdfsSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for HdfsSource.
func (hs HdfsSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -84816,7 +92292,7 @@ type HDInsightHiveActivityTypeProperties struct {
StorageLinkedServices *[]LinkedServiceReference `json:"storageLinkedServices,omitempty"`
// Arguments - User specified arguments to HDInsightActivity.
Arguments *[]interface{} `json:"arguments,omitempty"`
- // GetDebugInfo - Debug info option. Possible values include: 'None', 'Always', 'Failure'
+ // GetDebugInfo - Debug info option. Possible values include: 'HDInsightActivityDebugInfoOptionNone', 'HDInsightActivityDebugInfoOptionAlways', 'HDInsightActivityDebugInfoOptionFailure'
GetDebugInfo HDInsightActivityDebugInfoOption `json:"getDebugInfo,omitempty"`
// ScriptPath - Script path. Type: string (or Expression with resultType string).
ScriptPath interface{} `json:"scriptPath,omitempty"`
@@ -84874,7 +92350,7 @@ type HDInsightLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -85026,6 +92502,11 @@ func (hils HDInsightLinkedService) AsMarketoLinkedService() (*MarketoLinkedServi
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for HDInsightLinkedService.
+func (hils HDInsightLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for HDInsightLinkedService.
func (hils HDInsightLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -85899,7 +93380,7 @@ type HDInsightMapReduceActivityTypeProperties struct {
StorageLinkedServices *[]LinkedServiceReference `json:"storageLinkedServices,omitempty"`
// Arguments - User specified arguments to HDInsightActivity.
Arguments *[]interface{} `json:"arguments,omitempty"`
- // GetDebugInfo - Debug info option. Possible values include: 'None', 'Always', 'Failure'
+ // GetDebugInfo - Debug info option. Possible values include: 'HDInsightActivityDebugInfoOptionNone', 'HDInsightActivityDebugInfoOptionAlways', 'HDInsightActivityDebugInfoOptionFailure'
GetDebugInfo HDInsightActivityDebugInfoOption `json:"getDebugInfo,omitempty"`
// ClassName - Class name. Type: string (or Expression with resultType string).
ClassName interface{} `json:"className,omitempty"`
@@ -85957,7 +93438,7 @@ type HDInsightOnDemandLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -86109,6 +93590,11 @@ func (hiodls HDInsightOnDemandLinkedService) AsMarketoLinkedService() (*MarketoL
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for HDInsightOnDemandLinkedService.
+func (hiodls HDInsightOnDemandLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for HDInsightOnDemandLinkedService.
func (hiodls HDInsightOnDemandLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -87255,7 +94741,7 @@ type HDInsightPigActivityTypeProperties struct {
StorageLinkedServices *[]LinkedServiceReference `json:"storageLinkedServices,omitempty"`
// Arguments - User specified arguments to HDInsightActivity.
Arguments *[]interface{} `json:"arguments,omitempty"`
- // GetDebugInfo - Debug info option. Possible values include: 'None', 'Always', 'Failure'
+ // GetDebugInfo - Debug info option. Possible values include: 'HDInsightActivityDebugInfoOptionNone', 'HDInsightActivityDebugInfoOptionAlways', 'HDInsightActivityDebugInfoOptionFailure'
GetDebugInfo HDInsightActivityDebugInfoOption `json:"getDebugInfo,omitempty"`
// ScriptPath - Script path. Type: string (or Expression with resultType string).
ScriptPath interface{} `json:"scriptPath,omitempty"`
@@ -87637,7 +95123,7 @@ type HDInsightSparkActivityTypeProperties struct {
EntryFilePath interface{} `json:"entryFilePath,omitempty"`
// Arguments - The user-specified arguments to HDInsightSparkActivity.
Arguments *[]interface{} `json:"arguments,omitempty"`
- // GetDebugInfo - Debug info option. Possible values include: 'None', 'Always', 'Failure'
+ // GetDebugInfo - Debug info option. Possible values include: 'HDInsightActivityDebugInfoOptionNone', 'HDInsightActivityDebugInfoOptionAlways', 'HDInsightActivityDebugInfoOptionFailure'
GetDebugInfo HDInsightActivityDebugInfoOption `json:"getDebugInfo,omitempty"`
// SparkJobLinkedService - The storage linked service for uploading the entry file and dependencies, and for receiving logs.
SparkJobLinkedService *LinkedServiceReference `json:"sparkJobLinkedService,omitempty"`
@@ -88025,7 +95511,7 @@ type HDInsightStreamingActivityTypeProperties struct {
StorageLinkedServices *[]LinkedServiceReference `json:"storageLinkedServices,omitempty"`
// Arguments - User specified arguments to HDInsightActivity.
Arguments *[]interface{} `json:"arguments,omitempty"`
- // GetDebugInfo - Debug info option. Possible values include: 'None', 'Always', 'Failure'
+ // GetDebugInfo - Debug info option. Possible values include: 'HDInsightActivityDebugInfoOptionNone', 'HDInsightActivityDebugInfoOptionAlways', 'HDInsightActivityDebugInfoOptionFailure'
GetDebugInfo HDInsightActivityDebugInfoOption `json:"getDebugInfo,omitempty"`
// Mapper - Mapper executable name. Type: string (or Expression with resultType string).
Mapper interface{} `json:"mapper,omitempty"`
@@ -88089,6 +95575,16 @@ func (hisatp HDInsightStreamingActivityTypeProperties) MarshalJSON() ([]byte, er
return json.Marshal(objectMap)
}
+// HiveDatasetTypeProperties hive Properties
+type HiveDatasetTypeProperties struct {
+ // TableName - This property will be retired. Please consider using schema + table properties instead.
+ TableName interface{} `json:"tableName,omitempty"`
+ // Table - The table name of the Hive. Type: string (or Expression with resultType string).
+ Table interface{} `json:"table,omitempty"`
+ // Schema - The schema name of the Hive. Type: string (or Expression with resultType string).
+ Schema interface{} `json:"schema,omitempty"`
+}
+
// HiveLinkedService hive Server linked service.
type HiveLinkedService struct {
// HiveLinkedServiceTypeProperties - Hive Server linked service properties.
@@ -88103,7 +95599,7 @@ type HiveLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -88255,6 +95751,11 @@ func (hls HiveLinkedService) AsMarketoLinkedService() (*MarketoLinkedService, bo
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for HiveLinkedService.
+func (hls HiveLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for HiveLinkedService.
func (hls HiveLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -88883,8 +96384,8 @@ func (hlstp *HiveLinkedServiceTypeProperties) UnmarshalJSON(body []byte) error {
// HiveObjectDataset hive Server dataset.
type HiveObjectDataset struct {
- // GenericDatasetTypeProperties - Properties specific to this dataset type.
- *GenericDatasetTypeProperties `json:"typeProperties,omitempty"`
+ // HiveDatasetTypeProperties - Properties specific to this dataset type.
+ *HiveDatasetTypeProperties `json:"typeProperties,omitempty"`
// AdditionalProperties - Unmatched properties from the message are deserialized this collection
AdditionalProperties map[string]interface{} `json:""`
// Description - Dataset description.
@@ -88901,7 +96402,7 @@ type HiveObjectDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -88909,8 +96410,8 @@ type HiveObjectDataset struct {
func (hod HiveObjectDataset) MarshalJSON() ([]byte, error) {
hod.Type = TypeHiveObject
objectMap := make(map[string]interface{})
- if hod.GenericDatasetTypeProperties != nil {
- objectMap["typeProperties"] = hod.GenericDatasetTypeProperties
+ if hod.HiveDatasetTypeProperties != nil {
+ objectMap["typeProperties"] = hod.HiveDatasetTypeProperties
}
if hod.Description != nil {
objectMap["description"] = hod.Description
@@ -89037,6 +96538,11 @@ func (hod HiveObjectDataset) AsMarketoObjectDataset() (*MarketoObjectDataset, bo
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for HiveObjectDataset.
+func (hod HiveObjectDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for HiveObjectDataset.
func (hod HiveObjectDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -89162,6 +96668,16 @@ func (hod HiveObjectDataset) AsSapCloudForCustomerResourceDataset() (*SapCloudFo
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for HiveObjectDataset.
+func (hod HiveObjectDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for HiveObjectDataset.
+func (hod HiveObjectDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for HiveObjectDataset.
func (hod HiveObjectDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -89202,6 +96718,16 @@ func (hod HiveObjectDataset) AsRelationalTableDataset() (*RelationalTableDataset
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for HiveObjectDataset.
+func (hod HiveObjectDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for HiveObjectDataset.
+func (hod HiveObjectDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for HiveObjectDataset.
func (hod HiveObjectDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -89317,6 +96843,11 @@ func (hod HiveObjectDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for HiveObjectDataset.
+func (hod HiveObjectDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for HiveObjectDataset.
func (hod HiveObjectDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -89327,6 +96858,11 @@ func (hod HiveObjectDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for HiveObjectDataset.
+func (hod HiveObjectDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for HiveObjectDataset.
func (hod HiveObjectDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -89353,12 +96889,12 @@ func (hod *HiveObjectDataset) UnmarshalJSON(body []byte) error {
switch k {
case "typeProperties":
if v != nil {
- var genericDatasetTypeProperties GenericDatasetTypeProperties
- err = json.Unmarshal(*v, &genericDatasetTypeProperties)
+ var hiveDatasetTypeProperties HiveDatasetTypeProperties
+ err = json.Unmarshal(*v, &hiveDatasetTypeProperties)
if err != nil {
return err
}
- hod.GenericDatasetTypeProperties = &genericDatasetTypeProperties
+ hod.HiveDatasetTypeProperties = &hiveDatasetTypeProperties
}
default:
if v != nil {
@@ -89462,7 +96998,7 @@ type HiveSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -89586,6 +97122,11 @@ func (hs HiveSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for HiveSource.
+func (hs HiveSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for HiveSource.
func (hs HiveSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -89806,6 +97347,11 @@ func (hs HiveSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for HiveSource.
+func (hs HiveSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for HiveSource.
func (hs HiveSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -89881,6 +97427,11 @@ func (hs HiveSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for HiveSource.
+func (hs HiveSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for HiveSource.
func (hs HiveSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -89891,6 +97442,11 @@ func (hs HiveSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for HiveSource.
+func (hs HiveSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for HiveSource.
func (hs HiveSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -89993,7 +97549,7 @@ type HTTPDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -90129,6 +97685,11 @@ func (hd HTTPDataset) AsMarketoObjectDataset() (*MarketoObjectDataset, bool) {
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for HTTPDataset.
+func (hd HTTPDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for HTTPDataset.
func (hd HTTPDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -90254,6 +97815,16 @@ func (hd HTTPDataset) AsSapCloudForCustomerResourceDataset() (*SapCloudForCustom
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for HTTPDataset.
+func (hd HTTPDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for HTTPDataset.
+func (hd HTTPDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for HTTPDataset.
func (hd HTTPDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -90294,6 +97865,16 @@ func (hd HTTPDataset) AsRelationalTableDataset() (*RelationalTableDataset, bool)
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for HTTPDataset.
+func (hd HTTPDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for HTTPDataset.
+func (hd HTTPDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for HTTPDataset.
func (hd HTTPDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -90409,6 +97990,11 @@ func (hd HTTPDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for HTTPDataset.
+func (hd HTTPDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for HTTPDataset.
func (hd HTTPDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -90419,6 +98005,11 @@ func (hd HTTPDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for HTTPDataset.
+func (hd HTTPDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for HTTPDataset.
func (hd HTTPDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -90641,7 +98232,7 @@ type HTTPLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -90793,6 +98384,11 @@ func (hls HTTPLinkedService) AsMarketoLinkedService() (*MarketoLinkedService, bo
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for HTTPLinkedService.
+func (hls HTTPLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for HTTPLinkedService.
func (hls HTTPLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -91556,7 +99152,7 @@ type HTTPSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -91680,6 +99276,11 @@ func (hs HTTPSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for HTTPSource.
+func (hs HTTPSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for HTTPSource.
func (hs HTTPSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -91900,6 +99501,11 @@ func (hs HTTPSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for HTTPSource.
+func (hs HTTPSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for HTTPSource.
func (hs HTTPSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -91975,6 +99581,11 @@ func (hs HTTPSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for HTTPSource.
+func (hs HTTPSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for HTTPSource.
func (hs HTTPSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -91985,6 +99596,11 @@ func (hs HTTPSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for HTTPSource.
+func (hs HTTPSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for HTTPSource.
func (hs HTTPSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -92081,7 +99697,7 @@ type HubspotLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -92233,6 +99849,11 @@ func (hls HubspotLinkedService) AsMarketoLinkedService() (*MarketoLinkedService,
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for HubspotLinkedService.
+func (hls HubspotLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for HubspotLinkedService.
func (hls HubspotLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -92778,7 +100399,7 @@ type HubspotObjectDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -92914,6 +100535,11 @@ func (hod HubspotObjectDataset) AsMarketoObjectDataset() (*MarketoObjectDataset,
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for HubspotObjectDataset.
+func (hod HubspotObjectDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for HubspotObjectDataset.
func (hod HubspotObjectDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -93039,6 +100665,16 @@ func (hod HubspotObjectDataset) AsSapCloudForCustomerResourceDataset() (*SapClou
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for HubspotObjectDataset.
+func (hod HubspotObjectDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for HubspotObjectDataset.
+func (hod HubspotObjectDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for HubspotObjectDataset.
func (hod HubspotObjectDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -93079,6 +100715,16 @@ func (hod HubspotObjectDataset) AsRelationalTableDataset() (*RelationalTableData
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for HubspotObjectDataset.
+func (hod HubspotObjectDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for HubspotObjectDataset.
+func (hod HubspotObjectDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for HubspotObjectDataset.
func (hod HubspotObjectDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -93194,6 +100840,11 @@ func (hod HubspotObjectDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for HubspotObjectDataset.
+func (hod HubspotObjectDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for HubspotObjectDataset.
func (hod HubspotObjectDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -93204,6 +100855,11 @@ func (hod HubspotObjectDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for HubspotObjectDataset.
+func (hod HubspotObjectDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for HubspotObjectDataset.
func (hod HubspotObjectDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -93339,7 +100995,7 @@ type HubspotSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -93463,6 +101119,11 @@ func (hs HubspotSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for HubspotSource.
+func (hs HubspotSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for HubspotSource.
func (hs HubspotSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -93683,6 +101344,11 @@ func (hs HubspotSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for HubspotSource.
+func (hs HubspotSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for HubspotSource.
func (hs HubspotSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -93758,6 +101424,11 @@ func (hs HubspotSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for HubspotSource.
+func (hs HubspotSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for HubspotSource.
func (hs HubspotSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -93768,6 +101439,11 @@ func (hs HubspotSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for HubspotSource.
+func (hs HubspotSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for HubspotSource.
func (hs HubspotSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -94214,6 +101890,16 @@ func (icatp *IfConditionActivityTypeProperties) UnmarshalJSON(body []byte) error
return nil
}
+// ImpalaDatasetTypeProperties impala Dataset Properties
+type ImpalaDatasetTypeProperties struct {
+ // TableName - This property will be retired. Please consider using schema + table properties instead.
+ TableName interface{} `json:"tableName,omitempty"`
+ // Table - The table name of the Impala. Type: string (or Expression with resultType string).
+ Table interface{} `json:"table,omitempty"`
+ // Schema - The schema name of the Impala. Type: string (or Expression with resultType string).
+ Schema interface{} `json:"schema,omitempty"`
+}
+
// ImpalaLinkedService impala server linked service.
type ImpalaLinkedService struct {
// ImpalaLinkedServiceTypeProperties - Impala server linked service properties.
@@ -94228,7 +101914,7 @@ type ImpalaLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -94380,6 +102066,11 @@ func (ils ImpalaLinkedService) AsMarketoLinkedService() (*MarketoLinkedService,
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for ImpalaLinkedService.
+func (ils ImpalaLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for ImpalaLinkedService.
func (ils ImpalaLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -94942,8 +102633,8 @@ func (ilstp *ImpalaLinkedServiceTypeProperties) UnmarshalJSON(body []byte) error
// ImpalaObjectDataset impala server dataset.
type ImpalaObjectDataset struct {
- // GenericDatasetTypeProperties - Properties specific to this dataset type.
- *GenericDatasetTypeProperties `json:"typeProperties,omitempty"`
+ // ImpalaDatasetTypeProperties - Properties specific to this dataset type.
+ *ImpalaDatasetTypeProperties `json:"typeProperties,omitempty"`
// AdditionalProperties - Unmatched properties from the message are deserialized this collection
AdditionalProperties map[string]interface{} `json:""`
// Description - Dataset description.
@@ -94960,7 +102651,7 @@ type ImpalaObjectDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -94968,8 +102659,8 @@ type ImpalaObjectDataset struct {
func (iod ImpalaObjectDataset) MarshalJSON() ([]byte, error) {
iod.Type = TypeImpalaObject
objectMap := make(map[string]interface{})
- if iod.GenericDatasetTypeProperties != nil {
- objectMap["typeProperties"] = iod.GenericDatasetTypeProperties
+ if iod.ImpalaDatasetTypeProperties != nil {
+ objectMap["typeProperties"] = iod.ImpalaDatasetTypeProperties
}
if iod.Description != nil {
objectMap["description"] = iod.Description
@@ -95096,6 +102787,11 @@ func (iod ImpalaObjectDataset) AsMarketoObjectDataset() (*MarketoObjectDataset,
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for ImpalaObjectDataset.
+func (iod ImpalaObjectDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for ImpalaObjectDataset.
func (iod ImpalaObjectDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -95221,6 +102917,16 @@ func (iod ImpalaObjectDataset) AsSapCloudForCustomerResourceDataset() (*SapCloud
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for ImpalaObjectDataset.
+func (iod ImpalaObjectDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for ImpalaObjectDataset.
+func (iod ImpalaObjectDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for ImpalaObjectDataset.
func (iod ImpalaObjectDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -95261,6 +102967,16 @@ func (iod ImpalaObjectDataset) AsRelationalTableDataset() (*RelationalTableDatas
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for ImpalaObjectDataset.
+func (iod ImpalaObjectDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for ImpalaObjectDataset.
+func (iod ImpalaObjectDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for ImpalaObjectDataset.
func (iod ImpalaObjectDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -95376,6 +103092,11 @@ func (iod ImpalaObjectDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for ImpalaObjectDataset.
+func (iod ImpalaObjectDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for ImpalaObjectDataset.
func (iod ImpalaObjectDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -95386,6 +103107,11 @@ func (iod ImpalaObjectDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for ImpalaObjectDataset.
+func (iod ImpalaObjectDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for ImpalaObjectDataset.
func (iod ImpalaObjectDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -95412,12 +103138,12 @@ func (iod *ImpalaObjectDataset) UnmarshalJSON(body []byte) error {
switch k {
case "typeProperties":
if v != nil {
- var genericDatasetTypeProperties GenericDatasetTypeProperties
- err = json.Unmarshal(*v, &genericDatasetTypeProperties)
+ var impalaDatasetTypeProperties ImpalaDatasetTypeProperties
+ err = json.Unmarshal(*v, &impalaDatasetTypeProperties)
if err != nil {
return err
}
- iod.GenericDatasetTypeProperties = &genericDatasetTypeProperties
+ iod.ImpalaDatasetTypeProperties = &impalaDatasetTypeProperties
}
default:
if v != nil {
@@ -95521,7 +103247,7 @@ type ImpalaSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -95645,6 +103371,11 @@ func (is ImpalaSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for ImpalaSource.
+func (is ImpalaSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for ImpalaSource.
func (is ImpalaSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -95865,6 +103596,11 @@ func (is ImpalaSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for ImpalaSource.
+func (is ImpalaSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for ImpalaSource.
func (is ImpalaSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -95940,6 +103676,11 @@ func (is ImpalaSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for ImpalaSource.
+func (is ImpalaSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for ImpalaSource.
func (is ImpalaSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -95950,6 +103691,11 @@ func (is ImpalaSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for ImpalaSource.
+func (is ImpalaSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for ImpalaSource.
func (is ImpalaSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -96046,7 +103792,7 @@ type InformixLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -96198,6 +103944,11 @@ func (ils InformixLinkedService) AsMarketoLinkedService() (*MarketoLinkedService
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for InformixLinkedService.
+func (ils InformixLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for InformixLinkedService.
func (ils InformixLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -96718,7 +104469,7 @@ type InformixSink struct {
SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzurePostgreSQLSink', 'TypeDelimitedTextSink'
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
Type TypeBasicCopySink `json:"type,omitempty"`
}
@@ -96873,6 +104624,11 @@ func (is InformixSink) AsParquetSink() (*ParquetSink, bool) {
return nil, false
}
+// AsAvroSink is the BasicCopySink implementation for InformixSink.
+func (is InformixSink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
// AsAzureTableSink is the BasicCopySink implementation for InformixSink.
func (is InformixSink) AsAzureTableSink() (*AzureTableSink, bool) {
return nil, false
@@ -96888,11 +104644,21 @@ func (is InformixSink) AsSapCloudForCustomerSink() (*SapCloudForCustomerSink, bo
return nil, false
}
+// AsAzureMySQLSink is the BasicCopySink implementation for InformixSink.
+func (is InformixSink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
// AsAzurePostgreSQLSink is the BasicCopySink implementation for InformixSink.
func (is InformixSink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
return nil, false
}
+// AsJSONSink is the BasicCopySink implementation for InformixSink.
+func (is InformixSink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSink is the BasicCopySink implementation for InformixSink.
func (is InformixSink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
return nil, false
@@ -97010,7 +104776,7 @@ type InformixSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -97134,6 +104900,11 @@ func (is InformixSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for InformixSource.
+func (is InformixSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for InformixSource.
func (is InformixSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -97354,6 +105125,11 @@ func (is InformixSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for InformixSource.
+func (is InformixSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for InformixSource.
func (is InformixSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -97429,6 +105205,11 @@ func (is InformixSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for InformixSource.
+func (is InformixSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for InformixSource.
func (is InformixSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -97439,6 +105220,11 @@ func (is InformixSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for InformixSource.
+func (is InformixSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for InformixSource.
func (is InformixSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -97541,7 +105327,7 @@ type InformixTableDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -97677,6 +105463,11 @@ func (itd InformixTableDataset) AsMarketoObjectDataset() (*MarketoObjectDataset,
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for InformixTableDataset.
+func (itd InformixTableDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for InformixTableDataset.
func (itd InformixTableDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -97802,6 +105593,16 @@ func (itd InformixTableDataset) AsSapCloudForCustomerResourceDataset() (*SapClou
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for InformixTableDataset.
+func (itd InformixTableDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for InformixTableDataset.
+func (itd InformixTableDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for InformixTableDataset.
func (itd InformixTableDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -97842,6 +105643,16 @@ func (itd InformixTableDataset) AsRelationalTableDataset() (*RelationalTableData
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for InformixTableDataset.
+func (itd InformixTableDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for InformixTableDataset.
+func (itd InformixTableDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for InformixTableDataset.
func (itd InformixTableDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -97957,6 +105768,11 @@ func (itd InformixTableDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for InformixTableDataset.
+func (itd InformixTableDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for InformixTableDataset.
func (itd InformixTableDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -97967,6 +105783,11 @@ func (itd InformixTableDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for InformixTableDataset.
+func (itd InformixTableDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for InformixTableDataset.
func (itd InformixTableDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -99454,7 +107275,7 @@ type JiraLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -99606,6 +107427,11 @@ func (jls JiraLinkedService) AsMarketoLinkedService() (*MarketoLinkedService, bo
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for JiraLinkedService.
+func (jls JiraLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for JiraLinkedService.
func (jls JiraLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -100153,7 +107979,7 @@ type JiraObjectDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -100289,6 +108115,11 @@ func (jod JiraObjectDataset) AsMarketoObjectDataset() (*MarketoObjectDataset, bo
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for JiraObjectDataset.
+func (jod JiraObjectDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for JiraObjectDataset.
func (jod JiraObjectDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -100414,6 +108245,16 @@ func (jod JiraObjectDataset) AsSapCloudForCustomerResourceDataset() (*SapCloudFo
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for JiraObjectDataset.
+func (jod JiraObjectDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for JiraObjectDataset.
+func (jod JiraObjectDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for JiraObjectDataset.
func (jod JiraObjectDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -100454,6 +108295,16 @@ func (jod JiraObjectDataset) AsRelationalTableDataset() (*RelationalTableDataset
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for JiraObjectDataset.
+func (jod JiraObjectDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for JiraObjectDataset.
+func (jod JiraObjectDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for JiraObjectDataset.
func (jod JiraObjectDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -100569,6 +108420,11 @@ func (jod JiraObjectDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for JiraObjectDataset.
+func (jod JiraObjectDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for JiraObjectDataset.
func (jod JiraObjectDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -100579,6 +108435,11 @@ func (jod JiraObjectDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for JiraObjectDataset.
+func (jod JiraObjectDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for JiraObjectDataset.
func (jod JiraObjectDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -100714,7 +108575,7 @@ type JiraSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -100838,6 +108699,11 @@ func (js JiraSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for JiraSource.
+func (js JiraSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for JiraSource.
func (js JiraSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -101058,6 +108924,11 @@ func (js JiraSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for JiraSource.
+func (js JiraSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for JiraSource.
func (js JiraSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -101133,6 +109004,11 @@ func (js JiraSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for JiraSource.
+func (js JiraSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for JiraSource.
func (js JiraSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -101143,6 +109019,11 @@ func (js JiraSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for JiraSource.
+func (js JiraSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for JiraSource.
func (js JiraSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -101225,6 +109106,661 @@ func (js *JiraSource) UnmarshalJSON(body []byte) error {
return nil
}
+// JSONDataset json dataset.
+type JSONDataset struct {
+ // JSONDatasetTypeProperties - Json dataset properties.
+ *JSONDatasetTypeProperties `json:"typeProperties,omitempty"`
+ // AdditionalProperties - Unmatched properties from the message are deserialized this collection
+ AdditionalProperties map[string]interface{} `json:""`
+ // Description - Dataset description.
+ Description *string `json:"description,omitempty"`
+ // Structure - Columns that define the structure of the dataset. Type: array (or Expression with resultType array), itemType: DatasetDataElement.
+ Structure interface{} `json:"structure,omitempty"`
+ // Schema - Columns that define the physical type schema of the dataset. Type: array (or Expression with resultType array), itemType: DatasetSchemaDataElement.
+ Schema interface{} `json:"schema,omitempty"`
+ // LinkedServiceName - Linked service reference.
+ LinkedServiceName *LinkedServiceReference `json:"linkedServiceName,omitempty"`
+ // Parameters - Parameters for dataset.
+ Parameters map[string]*ParameterSpecification `json:"parameters"`
+ // Annotations - List of tags that can be used for describing the Dataset.
+ Annotations *[]interface{} `json:"annotations,omitempty"`
+ // Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
+ Folder *DatasetFolder `json:"folder,omitempty"`
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
+ Type TypeBasicDataset `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for JSONDataset.
+func (jd JSONDataset) MarshalJSON() ([]byte, error) {
+ jd.Type = TypeJSON
+ objectMap := make(map[string]interface{})
+ if jd.JSONDatasetTypeProperties != nil {
+ objectMap["typeProperties"] = jd.JSONDatasetTypeProperties
+ }
+ if jd.Description != nil {
+ objectMap["description"] = jd.Description
+ }
+ if jd.Structure != nil {
+ objectMap["structure"] = jd.Structure
+ }
+ if jd.Schema != nil {
+ objectMap["schema"] = jd.Schema
+ }
+ if jd.LinkedServiceName != nil {
+ objectMap["linkedServiceName"] = jd.LinkedServiceName
+ }
+ if jd.Parameters != nil {
+ objectMap["parameters"] = jd.Parameters
+ }
+ if jd.Annotations != nil {
+ objectMap["annotations"] = jd.Annotations
+ }
+ if jd.Folder != nil {
+ objectMap["folder"] = jd.Folder
+ }
+ if jd.Type != "" {
+ objectMap["type"] = jd.Type
+ }
+ for k, v := range jd.AdditionalProperties {
+ objectMap[k] = v
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsGoogleAdWordsObjectDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsGoogleAdWordsObjectDataset() (*GoogleAdWordsObjectDataset, bool) {
+ return nil, false
+}
+
+// AsAzureDataExplorerTableDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsAzureDataExplorerTableDataset() (*AzureDataExplorerTableDataset, bool) {
+ return nil, false
+}
+
+// AsOracleServiceCloudObjectDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsOracleServiceCloudObjectDataset() (*OracleServiceCloudObjectDataset, bool) {
+ return nil, false
+}
+
+// AsDynamicsAXResourceDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsDynamicsAXResourceDataset() (*DynamicsAXResourceDataset, bool) {
+ return nil, false
+}
+
+// AsResponsysObjectDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsResponsysObjectDataset() (*ResponsysObjectDataset, bool) {
+ return nil, false
+}
+
+// AsSalesforceMarketingCloudObjectDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsSalesforceMarketingCloudObjectDataset() (*SalesforceMarketingCloudObjectDataset, bool) {
+ return nil, false
+}
+
+// AsVerticaTableDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsVerticaTableDataset() (*VerticaTableDataset, bool) {
+ return nil, false
+}
+
+// AsNetezzaTableDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsNetezzaTableDataset() (*NetezzaTableDataset, bool) {
+ return nil, false
+}
+
+// AsZohoObjectDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsZohoObjectDataset() (*ZohoObjectDataset, bool) {
+ return nil, false
+}
+
+// AsXeroObjectDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsXeroObjectDataset() (*XeroObjectDataset, bool) {
+ return nil, false
+}
+
+// AsSquareObjectDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsSquareObjectDataset() (*SquareObjectDataset, bool) {
+ return nil, false
+}
+
+// AsSparkObjectDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsSparkObjectDataset() (*SparkObjectDataset, bool) {
+ return nil, false
+}
+
+// AsShopifyObjectDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsShopifyObjectDataset() (*ShopifyObjectDataset, bool) {
+ return nil, false
+}
+
+// AsServiceNowObjectDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsServiceNowObjectDataset() (*ServiceNowObjectDataset, bool) {
+ return nil, false
+}
+
+// AsQuickBooksObjectDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsQuickBooksObjectDataset() (*QuickBooksObjectDataset, bool) {
+ return nil, false
+}
+
+// AsPrestoObjectDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsPrestoObjectDataset() (*PrestoObjectDataset, bool) {
+ return nil, false
+}
+
+// AsPhoenixObjectDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsPhoenixObjectDataset() (*PhoenixObjectDataset, bool) {
+ return nil, false
+}
+
+// AsPaypalObjectDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsPaypalObjectDataset() (*PaypalObjectDataset, bool) {
+ return nil, false
+}
+
+// AsMarketoObjectDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsMarketoObjectDataset() (*MarketoObjectDataset, bool) {
+ return nil, false
+}
+
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
+// AsMariaDBTableDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
+ return nil, false
+}
+
+// AsMagentoObjectDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsMagentoObjectDataset() (*MagentoObjectDataset, bool) {
+ return nil, false
+}
+
+// AsJiraObjectDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsJiraObjectDataset() (*JiraObjectDataset, bool) {
+ return nil, false
+}
+
+// AsImpalaObjectDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsImpalaObjectDataset() (*ImpalaObjectDataset, bool) {
+ return nil, false
+}
+
+// AsHubspotObjectDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsHubspotObjectDataset() (*HubspotObjectDataset, bool) {
+ return nil, false
+}
+
+// AsHiveObjectDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsHiveObjectDataset() (*HiveObjectDataset, bool) {
+ return nil, false
+}
+
+// AsHBaseObjectDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsHBaseObjectDataset() (*HBaseObjectDataset, bool) {
+ return nil, false
+}
+
+// AsGreenplumTableDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsGreenplumTableDataset() (*GreenplumTableDataset, bool) {
+ return nil, false
+}
+
+// AsGoogleBigQueryObjectDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsGoogleBigQueryObjectDataset() (*GoogleBigQueryObjectDataset, bool) {
+ return nil, false
+}
+
+// AsEloquaObjectDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsEloquaObjectDataset() (*EloquaObjectDataset, bool) {
+ return nil, false
+}
+
+// AsDrillTableDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsDrillTableDataset() (*DrillTableDataset, bool) {
+ return nil, false
+}
+
+// AsCouchbaseTableDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsCouchbaseTableDataset() (*CouchbaseTableDataset, bool) {
+ return nil, false
+}
+
+// AsConcurObjectDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsConcurObjectDataset() (*ConcurObjectDataset, bool) {
+ return nil, false
+}
+
+// AsAzurePostgreSQLTableDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsAzurePostgreSQLTableDataset() (*AzurePostgreSQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonMWSObjectDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsAmazonMWSObjectDataset() (*AmazonMWSObjectDataset, bool) {
+ return nil, false
+}
+
+// AsHTTPDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsHTTPDataset() (*HTTPDataset, bool) {
+ return nil, false
+}
+
+// AsAzureSearchIndexDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsAzureSearchIndexDataset() (*AzureSearchIndexDataset, bool) {
+ return nil, false
+}
+
+// AsWebTableDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsWebTableDataset() (*WebTableDataset, bool) {
+ return nil, false
+}
+
+// AsSapTableResourceDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsSapTableResourceDataset() (*SapTableResourceDataset, bool) {
+ return nil, false
+}
+
+// AsRestResourceDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsRestResourceDataset() (*RestResourceDataset, bool) {
+ return nil, false
+}
+
+// AsSQLServerTableDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsSQLServerTableDataset() (*SQLServerTableDataset, bool) {
+ return nil, false
+}
+
+// AsSapOpenHubTableDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsSapOpenHubTableDataset() (*SapOpenHubTableDataset, bool) {
+ return nil, false
+}
+
+// AsSapHanaTableDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsSapHanaTableDataset() (*SapHanaTableDataset, bool) {
+ return nil, false
+}
+
+// AsSapEccResourceDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsSapEccResourceDataset() (*SapEccResourceDataset, bool) {
+ return nil, false
+}
+
+// AsSapCloudForCustomerResourceDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsSapCloudForCustomerResourceDataset() (*SapCloudForCustomerResourceDataset, bool) {
+ return nil, false
+}
+
+// AsSapBwCubeDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
+// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
+ return nil, false
+}
+
+// AsSalesforceObjectDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsSalesforceObjectDataset() (*SalesforceObjectDataset, bool) {
+ return nil, false
+}
+
+// AsMicrosoftAccessTableDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsMicrosoftAccessTableDataset() (*MicrosoftAccessTableDataset, bool) {
+ return nil, false
+}
+
+// AsPostgreSQLTableDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsPostgreSQLTableDataset() (*PostgreSQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsMySQLTableDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsMySQLTableDataset() (*MySQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsOdbcTableDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsOdbcTableDataset() (*OdbcTableDataset, bool) {
+ return nil, false
+}
+
+// AsInformixTableDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsInformixTableDataset() (*InformixTableDataset, bool) {
+ return nil, false
+}
+
+// AsRelationalTableDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsRelationalTableDataset() (*RelationalTableDataset, bool) {
+ return nil, false
+}
+
+// AsDb2TableDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureMySQLTableDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsTeradataTableDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsTeradataTableDataset() (*TeradataTableDataset, bool) {
+ return nil, false
+}
+
+// AsOracleTableDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsOracleTableDataset() (*OracleTableDataset, bool) {
+ return nil, false
+}
+
+// AsODataResourceDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsODataResourceDataset() (*ODataResourceDataset, bool) {
+ return nil, false
+}
+
+// AsCosmosDbMongoDbAPICollectionDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsCosmosDbMongoDbAPICollectionDataset() (*CosmosDbMongoDbAPICollectionDataset, bool) {
+ return nil, false
+}
+
+// AsMongoDbV2CollectionDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsMongoDbV2CollectionDataset() (*MongoDbV2CollectionDataset, bool) {
+ return nil, false
+}
+
+// AsMongoDbCollectionDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsMongoDbCollectionDataset() (*MongoDbCollectionDataset, bool) {
+ return nil, false
+}
+
+// AsFileShareDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsFileShareDataset() (*FileShareDataset, bool) {
+ return nil, false
+}
+
+// AsOffice365Dataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsOffice365Dataset() (*Office365Dataset, bool) {
+ return nil, false
+}
+
+// AsAzureBlobFSDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsAzureBlobFSDataset() (*AzureBlobFSDataset, bool) {
+ return nil, false
+}
+
+// AsAzureDataLakeStoreDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsAzureDataLakeStoreDataset() (*AzureDataLakeStoreDataset, bool) {
+ return nil, false
+}
+
+// AsCommonDataServiceForAppsEntityDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsCommonDataServiceForAppsEntityDataset() (*CommonDataServiceForAppsEntityDataset, bool) {
+ return nil, false
+}
+
+// AsDynamicsCrmEntityDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsDynamicsCrmEntityDataset() (*DynamicsCrmEntityDataset, bool) {
+ return nil, false
+}
+
+// AsDynamicsEntityDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsDynamicsEntityDataset() (*DynamicsEntityDataset, bool) {
+ return nil, false
+}
+
+// AsDocumentDbCollectionDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsDocumentDbCollectionDataset() (*DocumentDbCollectionDataset, bool) {
+ return nil, false
+}
+
+// AsCustomDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsCustomDataset() (*CustomDataset, bool) {
+ return nil, false
+}
+
+// AsCassandraTableDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsCassandraTableDataset() (*CassandraTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureSQLDWTableDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsAzureSQLDWTableDataset() (*AzureSQLDWTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureSQLMITableDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsAzureSQLMITableDataset() (*AzureSQLMITableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureSQLTableDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsAzureSQLTableDataset() (*AzureSQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureTableDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsAzureTableDataset() (*AzureTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureBlobDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsAzureBlobDataset() (*AzureBlobDataset, bool) {
+ return nil, false
+}
+
+// AsBinaryDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsBinaryDataset() (*BinaryDataset, bool) {
+ return nil, false
+}
+
+// AsJSONDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return &jd, true
+}
+
+// AsDelimitedTextDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
+ return nil, false
+}
+
+// AsParquetDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsParquetDataset() (*ParquetDataset, bool) {
+ return nil, false
+}
+
+// AsAvroDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonS3Dataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
+ return nil, false
+}
+
+// AsDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsDataset() (*Dataset, bool) {
+ return nil, false
+}
+
+// AsBasicDataset is the BasicDataset implementation for JSONDataset.
+func (jd JSONDataset) AsBasicDataset() (BasicDataset, bool) {
+ return &jd, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for JSONDataset struct.
+func (jd *JSONDataset) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "typeProperties":
+ if v != nil {
+ var JSONDatasetTypeProperties JSONDatasetTypeProperties
+ err = json.Unmarshal(*v, &JSONDatasetTypeProperties)
+ if err != nil {
+ return err
+ }
+ jd.JSONDatasetTypeProperties = &JSONDatasetTypeProperties
+ }
+ default:
+ if v != nil {
+ var additionalProperties interface{}
+ err = json.Unmarshal(*v, &additionalProperties)
+ if err != nil {
+ return err
+ }
+ if jd.AdditionalProperties == nil {
+ jd.AdditionalProperties = make(map[string]interface{})
+ }
+ jd.AdditionalProperties[k] = additionalProperties
+ }
+ case "description":
+ if v != nil {
+ var description string
+ err = json.Unmarshal(*v, &description)
+ if err != nil {
+ return err
+ }
+ jd.Description = &description
+ }
+ case "structure":
+ if v != nil {
+ var structure interface{}
+ err = json.Unmarshal(*v, &structure)
+ if err != nil {
+ return err
+ }
+ jd.Structure = structure
+ }
+ case "schema":
+ if v != nil {
+ var schema interface{}
+ err = json.Unmarshal(*v, &schema)
+ if err != nil {
+ return err
+ }
+ jd.Schema = schema
+ }
+ case "linkedServiceName":
+ if v != nil {
+ var linkedServiceName LinkedServiceReference
+ err = json.Unmarshal(*v, &linkedServiceName)
+ if err != nil {
+ return err
+ }
+ jd.LinkedServiceName = &linkedServiceName
+ }
+ case "parameters":
+ if v != nil {
+ var parameters map[string]*ParameterSpecification
+ err = json.Unmarshal(*v, ¶meters)
+ if err != nil {
+ return err
+ }
+ jd.Parameters = parameters
+ }
+ case "annotations":
+ if v != nil {
+ var annotations []interface{}
+ err = json.Unmarshal(*v, &annotations)
+ if err != nil {
+ return err
+ }
+ jd.Annotations = &annotations
+ }
+ case "folder":
+ if v != nil {
+ var folder DatasetFolder
+ err = json.Unmarshal(*v, &folder)
+ if err != nil {
+ return err
+ }
+ jd.Folder = &folder
+ }
+ case "type":
+ if v != nil {
+ var typeVar TypeBasicDataset
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ jd.Type = typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// JSONDatasetTypeProperties json dataset properties.
+type JSONDatasetTypeProperties struct {
+ // Location - The location of the json data storage.
+ Location *DatasetLocation `json:"location,omitempty"`
+ // EncodingName - The code page name of the preferred encoding. If not specified, the default value is UTF-8, unless BOM denotes another Unicode encoding. Refer to the name column of the table in the following link to set supported values: https://msdn.microsoft.com/library/system.text.encoding.aspx. Type: string (or Expression with resultType string).
+ EncodingName interface{} `json:"encodingName,omitempty"`
+ // Compression - The data compression method used for the json dataset.
+ Compression BasicDatasetCompression `json:"compression,omitempty"`
+}
+
+// UnmarshalJSON is the custom unmarshaler for JSONDatasetTypeProperties struct.
+func (jdtp *JSONDatasetTypeProperties) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "location":
+ if v != nil {
+ var location DatasetLocation
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ jdtp.Location = &location
+ }
+ case "encodingName":
+ if v != nil {
+ var encodingName interface{}
+ err = json.Unmarshal(*v, &encodingName)
+ if err != nil {
+ return err
+ }
+ jdtp.EncodingName = encodingName
+ }
+ case "compression":
+ if v != nil {
+ compression, err := unmarshalBasicDatasetCompression(*v)
+ if err != nil {
+ return err
+ }
+ jdtp.Compression = compression
+ }
+ }
+ }
+
+ return nil
+}
+
// JSONFormat the data stored in JSON format.
type JSONFormat struct {
// FilePattern - File pattern of JSON. To be more specific, the way of separating a collection of JSON objects. The default value is 'setOfObjects'. It is case-sensitive.
@@ -101415,6 +109951,941 @@ func (jf *JSONFormat) UnmarshalJSON(body []byte) error {
return nil
}
+// JSONSink a copy activity Json sink.
+type JSONSink struct {
+ // StoreSettings - Json store settings.
+ StoreSettings BasicStoreWriteSettings `json:"storeSettings,omitempty"`
+ // FormatSettings - Json format settings.
+ FormatSettings *JSONWriteSettings `json:"formatSettings,omitempty"`
+ // AdditionalProperties - Unmatched properties from the message are deserialized this collection
+ AdditionalProperties map[string]interface{} `json:""`
+ // WriteBatchSize - Write batch size. Type: integer (or Expression with resultType integer), minimum: 0.
+ WriteBatchSize interface{} `json:"writeBatchSize,omitempty"`
+ // WriteBatchTimeout - Write batch timeout. Type: string (or Expression with resultType string), pattern: ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ WriteBatchTimeout interface{} `json:"writeBatchTimeout,omitempty"`
+ // SinkRetryCount - Sink retry count. Type: integer (or Expression with resultType integer).
+ SinkRetryCount interface{} `json:"sinkRetryCount,omitempty"`
+ // SinkRetryWait - Sink retry wait. Type: string (or Expression with resultType string), pattern: ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
+ // MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
+ MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
+ Type TypeBasicCopySink `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for JSONSink.
+func (js JSONSink) MarshalJSON() ([]byte, error) {
+ js.Type = TypeJSONSink
+ objectMap := make(map[string]interface{})
+ objectMap["storeSettings"] = js.StoreSettings
+ if js.FormatSettings != nil {
+ objectMap["formatSettings"] = js.FormatSettings
+ }
+ if js.WriteBatchSize != nil {
+ objectMap["writeBatchSize"] = js.WriteBatchSize
+ }
+ if js.WriteBatchTimeout != nil {
+ objectMap["writeBatchTimeout"] = js.WriteBatchTimeout
+ }
+ if js.SinkRetryCount != nil {
+ objectMap["sinkRetryCount"] = js.SinkRetryCount
+ }
+ if js.SinkRetryWait != nil {
+ objectMap["sinkRetryWait"] = js.SinkRetryWait
+ }
+ if js.MaxConcurrentConnections != nil {
+ objectMap["maxConcurrentConnections"] = js.MaxConcurrentConnections
+ }
+ if js.Type != "" {
+ objectMap["type"] = js.Type
+ }
+ for k, v := range js.AdditionalProperties {
+ objectMap[k] = v
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsCosmosDbMongoDbAPISink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsCosmosDbMongoDbAPISink() (*CosmosDbMongoDbAPISink, bool) {
+ return nil, false
+}
+
+// AsSalesforceServiceCloudSink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsSalesforceServiceCloudSink() (*SalesforceServiceCloudSink, bool) {
+ return nil, false
+}
+
+// AsSalesforceSink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsSalesforceSink() (*SalesforceSink, bool) {
+ return nil, false
+}
+
+// AsAzureDataExplorerSink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsAzureDataExplorerSink() (*AzureDataExplorerSink, bool) {
+ return nil, false
+}
+
+// AsCommonDataServiceForAppsSink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsCommonDataServiceForAppsSink() (*CommonDataServiceForAppsSink, bool) {
+ return nil, false
+}
+
+// AsDynamicsCrmSink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsDynamicsCrmSink() (*DynamicsCrmSink, bool) {
+ return nil, false
+}
+
+// AsDynamicsSink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsDynamicsSink() (*DynamicsSink, bool) {
+ return nil, false
+}
+
+// AsMicrosoftAccessSink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsMicrosoftAccessSink() (*MicrosoftAccessSink, bool) {
+ return nil, false
+}
+
+// AsInformixSink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsInformixSink() (*InformixSink, bool) {
+ return nil, false
+}
+
+// AsOdbcSink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsOdbcSink() (*OdbcSink, bool) {
+ return nil, false
+}
+
+// AsAzureSearchIndexSink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsAzureSearchIndexSink() (*AzureSearchIndexSink, bool) {
+ return nil, false
+}
+
+// AsAzureBlobFSSink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsAzureBlobFSSink() (*AzureBlobFSSink, bool) {
+ return nil, false
+}
+
+// AsAzureDataLakeStoreSink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsAzureDataLakeStoreSink() (*AzureDataLakeStoreSink, bool) {
+ return nil, false
+}
+
+// AsOracleSink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsOracleSink() (*OracleSink, bool) {
+ return nil, false
+}
+
+// AsSQLDWSink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsSQLDWSink() (*SQLDWSink, bool) {
+ return nil, false
+}
+
+// AsSQLMISink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsSQLMISink() (*SQLMISink, bool) {
+ return nil, false
+}
+
+// AsAzureSQLSink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsAzureSQLSink() (*AzureSQLSink, bool) {
+ return nil, false
+}
+
+// AsSQLServerSink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsSQLServerSink() (*SQLServerSink, bool) {
+ return nil, false
+}
+
+// AsSQLSink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsSQLSink() (*SQLSink, bool) {
+ return nil, false
+}
+
+// AsDocumentDbCollectionSink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsDocumentDbCollectionSink() (*DocumentDbCollectionSink, bool) {
+ return nil, false
+}
+
+// AsFileSystemSink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsFileSystemSink() (*FileSystemSink, bool) {
+ return nil, false
+}
+
+// AsBlobSink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsBlobSink() (*BlobSink, bool) {
+ return nil, false
+}
+
+// AsBinarySink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsBinarySink() (*BinarySink, bool) {
+ return nil, false
+}
+
+// AsParquetSink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsParquetSink() (*ParquetSink, bool) {
+ return nil, false
+}
+
+// AsAvroSink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
+// AsAzureTableSink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsAzureTableSink() (*AzureTableSink, bool) {
+ return nil, false
+}
+
+// AsAzureQueueSink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsAzureQueueSink() (*AzureQueueSink, bool) {
+ return nil, false
+}
+
+// AsSapCloudForCustomerSink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsSapCloudForCustomerSink() (*SapCloudForCustomerSink, bool) {
+ return nil, false
+}
+
+// AsAzureMySQLSink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
+// AsAzurePostgreSQLSink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
+ return nil, false
+}
+
+// AsJSONSink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsJSONSink() (*JSONSink, bool) {
+ return &js, true
+}
+
+// AsDelimitedTextSink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
+ return nil, false
+}
+
+// AsCopySink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsCopySink() (*CopySink, bool) {
+ return nil, false
+}
+
+// AsBasicCopySink is the BasicCopySink implementation for JSONSink.
+func (js JSONSink) AsBasicCopySink() (BasicCopySink, bool) {
+ return &js, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for JSONSink struct.
+func (js *JSONSink) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "storeSettings":
+ if v != nil {
+ storeSettings, err := unmarshalBasicStoreWriteSettings(*v)
+ if err != nil {
+ return err
+ }
+ js.StoreSettings = storeSettings
+ }
+ case "formatSettings":
+ if v != nil {
+ var formatSettings JSONWriteSettings
+ err = json.Unmarshal(*v, &formatSettings)
+ if err != nil {
+ return err
+ }
+ js.FormatSettings = &formatSettings
+ }
+ default:
+ if v != nil {
+ var additionalProperties interface{}
+ err = json.Unmarshal(*v, &additionalProperties)
+ if err != nil {
+ return err
+ }
+ if js.AdditionalProperties == nil {
+ js.AdditionalProperties = make(map[string]interface{})
+ }
+ js.AdditionalProperties[k] = additionalProperties
+ }
+ case "writeBatchSize":
+ if v != nil {
+ var writeBatchSize interface{}
+ err = json.Unmarshal(*v, &writeBatchSize)
+ if err != nil {
+ return err
+ }
+ js.WriteBatchSize = writeBatchSize
+ }
+ case "writeBatchTimeout":
+ if v != nil {
+ var writeBatchTimeout interface{}
+ err = json.Unmarshal(*v, &writeBatchTimeout)
+ if err != nil {
+ return err
+ }
+ js.WriteBatchTimeout = writeBatchTimeout
+ }
+ case "sinkRetryCount":
+ if v != nil {
+ var sinkRetryCount interface{}
+ err = json.Unmarshal(*v, &sinkRetryCount)
+ if err != nil {
+ return err
+ }
+ js.SinkRetryCount = sinkRetryCount
+ }
+ case "sinkRetryWait":
+ if v != nil {
+ var sinkRetryWait interface{}
+ err = json.Unmarshal(*v, &sinkRetryWait)
+ if err != nil {
+ return err
+ }
+ js.SinkRetryWait = sinkRetryWait
+ }
+ case "maxConcurrentConnections":
+ if v != nil {
+ var maxConcurrentConnections interface{}
+ err = json.Unmarshal(*v, &maxConcurrentConnections)
+ if err != nil {
+ return err
+ }
+ js.MaxConcurrentConnections = maxConcurrentConnections
+ }
+ case "type":
+ if v != nil {
+ var typeVar TypeBasicCopySink
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ js.Type = typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// JSONSource a copy activity Json source.
+type JSONSource struct {
+ // StoreSettings - Json store settings.
+ StoreSettings *StoreReadSettings `json:"storeSettings,omitempty"`
+ // AdditionalProperties - Unmatched properties from the message are deserialized this collection
+ AdditionalProperties map[string]interface{} `json:""`
+ // SourceRetryCount - Source retry count. Type: integer (or Expression with resultType integer).
+ SourceRetryCount interface{} `json:"sourceRetryCount,omitempty"`
+ // SourceRetryWait - Source retry wait. Type: string (or Expression with resultType string), pattern: ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
+ // MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
+ MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
+ Type TypeBasicCopySource `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for JSONSource.
+func (js JSONSource) MarshalJSON() ([]byte, error) {
+ js.Type = TypeJSONSource
+ objectMap := make(map[string]interface{})
+ if js.StoreSettings != nil {
+ objectMap["storeSettings"] = js.StoreSettings
+ }
+ if js.SourceRetryCount != nil {
+ objectMap["sourceRetryCount"] = js.SourceRetryCount
+ }
+ if js.SourceRetryWait != nil {
+ objectMap["sourceRetryWait"] = js.SourceRetryWait
+ }
+ if js.MaxConcurrentConnections != nil {
+ objectMap["maxConcurrentConnections"] = js.MaxConcurrentConnections
+ }
+ if js.Type != "" {
+ objectMap["type"] = js.Type
+ }
+ for k, v := range js.AdditionalProperties {
+ objectMap[k] = v
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsAmazonRedshiftSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsAmazonRedshiftSource() (*AmazonRedshiftSource, bool) {
+ return nil, false
+}
+
+// AsGoogleAdWordsSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsGoogleAdWordsSource() (*GoogleAdWordsSource, bool) {
+ return nil, false
+}
+
+// AsOracleServiceCloudSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsOracleServiceCloudSource() (*OracleServiceCloudSource, bool) {
+ return nil, false
+}
+
+// AsDynamicsAXSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsDynamicsAXSource() (*DynamicsAXSource, bool) {
+ return nil, false
+}
+
+// AsResponsysSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsResponsysSource() (*ResponsysSource, bool) {
+ return nil, false
+}
+
+// AsSalesforceMarketingCloudSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsSalesforceMarketingCloudSource() (*SalesforceMarketingCloudSource, bool) {
+ return nil, false
+}
+
+// AsVerticaSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsVerticaSource() (*VerticaSource, bool) {
+ return nil, false
+}
+
+// AsNetezzaSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsNetezzaSource() (*NetezzaSource, bool) {
+ return nil, false
+}
+
+// AsZohoSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsZohoSource() (*ZohoSource, bool) {
+ return nil, false
+}
+
+// AsXeroSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsXeroSource() (*XeroSource, bool) {
+ return nil, false
+}
+
+// AsSquareSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsSquareSource() (*SquareSource, bool) {
+ return nil, false
+}
+
+// AsSparkSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsSparkSource() (*SparkSource, bool) {
+ return nil, false
+}
+
+// AsShopifySource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsShopifySource() (*ShopifySource, bool) {
+ return nil, false
+}
+
+// AsServiceNowSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsServiceNowSource() (*ServiceNowSource, bool) {
+ return nil, false
+}
+
+// AsQuickBooksSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsQuickBooksSource() (*QuickBooksSource, bool) {
+ return nil, false
+}
+
+// AsPrestoSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsPrestoSource() (*PrestoSource, bool) {
+ return nil, false
+}
+
+// AsPhoenixSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsPhoenixSource() (*PhoenixSource, bool) {
+ return nil, false
+}
+
+// AsPaypalSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsPaypalSource() (*PaypalSource, bool) {
+ return nil, false
+}
+
+// AsMarketoSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsMarketoSource() (*MarketoSource, bool) {
+ return nil, false
+}
+
+// AsAzureMariaDBSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
+// AsMariaDBSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsMariaDBSource() (*MariaDBSource, bool) {
+ return nil, false
+}
+
+// AsMagentoSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsMagentoSource() (*MagentoSource, bool) {
+ return nil, false
+}
+
+// AsJiraSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsJiraSource() (*JiraSource, bool) {
+ return nil, false
+}
+
+// AsImpalaSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsImpalaSource() (*ImpalaSource, bool) {
+ return nil, false
+}
+
+// AsHubspotSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsHubspotSource() (*HubspotSource, bool) {
+ return nil, false
+}
+
+// AsHiveSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsHiveSource() (*HiveSource, bool) {
+ return nil, false
+}
+
+// AsHBaseSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsHBaseSource() (*HBaseSource, bool) {
+ return nil, false
+}
+
+// AsGreenplumSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsGreenplumSource() (*GreenplumSource, bool) {
+ return nil, false
+}
+
+// AsGoogleBigQuerySource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsGoogleBigQuerySource() (*GoogleBigQuerySource, bool) {
+ return nil, false
+}
+
+// AsEloquaSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsEloquaSource() (*EloquaSource, bool) {
+ return nil, false
+}
+
+// AsDrillSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsDrillSource() (*DrillSource, bool) {
+ return nil, false
+}
+
+// AsCouchbaseSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsCouchbaseSource() (*CouchbaseSource, bool) {
+ return nil, false
+}
+
+// AsConcurSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsConcurSource() (*ConcurSource, bool) {
+ return nil, false
+}
+
+// AsAzurePostgreSQLSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsAzurePostgreSQLSource() (*AzurePostgreSQLSource, bool) {
+ return nil, false
+}
+
+// AsAmazonMWSSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsAmazonMWSSource() (*AmazonMWSSource, bool) {
+ return nil, false
+}
+
+// AsHTTPSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsHTTPSource() (*HTTPSource, bool) {
+ return nil, false
+}
+
+// AsAzureBlobFSSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsAzureBlobFSSource() (*AzureBlobFSSource, bool) {
+ return nil, false
+}
+
+// AsAzureDataLakeStoreSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsAzureDataLakeStoreSource() (*AzureDataLakeStoreSource, bool) {
+ return nil, false
+}
+
+// AsOffice365Source is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsOffice365Source() (*Office365Source, bool) {
+ return nil, false
+}
+
+// AsCosmosDbMongoDbAPISource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsCosmosDbMongoDbAPISource() (*CosmosDbMongoDbAPISource, bool) {
+ return nil, false
+}
+
+// AsMongoDbV2Source is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsMongoDbV2Source() (*MongoDbV2Source, bool) {
+ return nil, false
+}
+
+// AsMongoDbSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsMongoDbSource() (*MongoDbSource, bool) {
+ return nil, false
+}
+
+// AsCassandraSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsCassandraSource() (*CassandraSource, bool) {
+ return nil, false
+}
+
+// AsWebSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsWebSource() (*WebSource, bool) {
+ return nil, false
+}
+
+// AsTeradataSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsTeradataSource() (*TeradataSource, bool) {
+ return nil, false
+}
+
+// AsOracleSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsOracleSource() (*OracleSource, bool) {
+ return nil, false
+}
+
+// AsAzureDataExplorerSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsAzureDataExplorerSource() (*AzureDataExplorerSource, bool) {
+ return nil, false
+}
+
+// AsAzureMySQLSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsAzureMySQLSource() (*AzureMySQLSource, bool) {
+ return nil, false
+}
+
+// AsHdfsSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsHdfsSource() (*HdfsSource, bool) {
+ return nil, false
+}
+
+// AsFileSystemSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsFileSystemSource() (*FileSystemSource, bool) {
+ return nil, false
+}
+
+// AsSQLDWSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsSQLDWSource() (*SQLDWSource, bool) {
+ return nil, false
+}
+
+// AsSQLMISource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsSQLMISource() (*SQLMISource, bool) {
+ return nil, false
+}
+
+// AsAzureSQLSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsAzureSQLSource() (*AzureSQLSource, bool) {
+ return nil, false
+}
+
+// AsSQLServerSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsSQLServerSource() (*SQLServerSource, bool) {
+ return nil, false
+}
+
+// AsSQLSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsSQLSource() (*SQLSource, bool) {
+ return nil, false
+}
+
+// AsRestSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsRestSource() (*RestSource, bool) {
+ return nil, false
+}
+
+// AsSapTableSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsSapTableSource() (*SapTableSource, bool) {
+ return nil, false
+}
+
+// AsSapOpenHubSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsSapOpenHubSource() (*SapOpenHubSource, bool) {
+ return nil, false
+}
+
+// AsSapHanaSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsSapHanaSource() (*SapHanaSource, bool) {
+ return nil, false
+}
+
+// AsSapEccSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsSapEccSource() (*SapEccSource, bool) {
+ return nil, false
+}
+
+// AsSapCloudForCustomerSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsSapCloudForCustomerSource() (*SapCloudForCustomerSource, bool) {
+ return nil, false
+}
+
+// AsSalesforceServiceCloudSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsSalesforceServiceCloudSource() (*SalesforceServiceCloudSource, bool) {
+ return nil, false
+}
+
+// AsSalesforceSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsSalesforceSource() (*SalesforceSource, bool) {
+ return nil, false
+}
+
+// AsODataSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsODataSource() (*ODataSource, bool) {
+ return nil, false
+}
+
+// AsSapBwSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
+// AsSybaseSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsSybaseSource() (*SybaseSource, bool) {
+ return nil, false
+}
+
+// AsPostgreSQLSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsPostgreSQLSource() (*PostgreSQLSource, bool) {
+ return nil, false
+}
+
+// AsMySQLSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsMySQLSource() (*MySQLSource, bool) {
+ return nil, false
+}
+
+// AsOdbcSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsOdbcSource() (*OdbcSource, bool) {
+ return nil, false
+}
+
+// AsDb2Source is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsDb2Source() (*Db2Source, bool) {
+ return nil, false
+}
+
+// AsMicrosoftAccessSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsMicrosoftAccessSource() (*MicrosoftAccessSource, bool) {
+ return nil, false
+}
+
+// AsInformixSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsInformixSource() (*InformixSource, bool) {
+ return nil, false
+}
+
+// AsRelationalSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsRelationalSource() (*RelationalSource, bool) {
+ return nil, false
+}
+
+// AsCommonDataServiceForAppsSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsCommonDataServiceForAppsSource() (*CommonDataServiceForAppsSource, bool) {
+ return nil, false
+}
+
+// AsDynamicsCrmSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsDynamicsCrmSource() (*DynamicsCrmSource, bool) {
+ return nil, false
+}
+
+// AsDynamicsSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsDynamicsSource() (*DynamicsSource, bool) {
+ return nil, false
+}
+
+// AsDocumentDbCollectionSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsDocumentDbCollectionSource() (*DocumentDbCollectionSource, bool) {
+ return nil, false
+}
+
+// AsBlobSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsBlobSource() (*BlobSource, bool) {
+ return nil, false
+}
+
+// AsAzureTableSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsAzureTableSource() (*AzureTableSource, bool) {
+ return nil, false
+}
+
+// AsBinarySource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsBinarySource() (*BinarySource, bool) {
+ return nil, false
+}
+
+// AsJSONSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsJSONSource() (*JSONSource, bool) {
+ return &js, true
+}
+
+// AsDelimitedTextSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
+ return nil, false
+}
+
+// AsParquetSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsParquetSource() (*ParquetSource, bool) {
+ return nil, false
+}
+
+// AsAvroSource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
+// AsCopySource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsCopySource() (*CopySource, bool) {
+ return nil, false
+}
+
+// AsBasicCopySource is the BasicCopySource implementation for JSONSource.
+func (js JSONSource) AsBasicCopySource() (BasicCopySource, bool) {
+ return &js, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for JSONSource struct.
+func (js *JSONSource) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "storeSettings":
+ if v != nil {
+ var storeSettings StoreReadSettings
+ err = json.Unmarshal(*v, &storeSettings)
+ if err != nil {
+ return err
+ }
+ js.StoreSettings = &storeSettings
+ }
+ default:
+ if v != nil {
+ var additionalProperties interface{}
+ err = json.Unmarshal(*v, &additionalProperties)
+ if err != nil {
+ return err
+ }
+ if js.AdditionalProperties == nil {
+ js.AdditionalProperties = make(map[string]interface{})
+ }
+ js.AdditionalProperties[k] = additionalProperties
+ }
+ case "sourceRetryCount":
+ if v != nil {
+ var sourceRetryCount interface{}
+ err = json.Unmarshal(*v, &sourceRetryCount)
+ if err != nil {
+ return err
+ }
+ js.SourceRetryCount = sourceRetryCount
+ }
+ case "sourceRetryWait":
+ if v != nil {
+ var sourceRetryWait interface{}
+ err = json.Unmarshal(*v, &sourceRetryWait)
+ if err != nil {
+ return err
+ }
+ js.SourceRetryWait = sourceRetryWait
+ }
+ case "maxConcurrentConnections":
+ if v != nil {
+ var maxConcurrentConnections interface{}
+ err = json.Unmarshal(*v, &maxConcurrentConnections)
+ if err != nil {
+ return err
+ }
+ js.MaxConcurrentConnections = maxConcurrentConnections
+ }
+ case "type":
+ if v != nil {
+ var typeVar TypeBasicCopySource
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ js.Type = typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// JSONWriteSettings json write settings.
+type JSONWriteSettings struct {
+ // FilePattern - File pattern of JSON. This setting controls the way a collection of JSON objects will be treated. The default value is 'setOfObjects'. It is case-sensitive. Possible values include: 'SetOfObjects', 'ArrayOfObjects'
+ FilePattern JSONWriteFilePattern `json:"filePattern,omitempty"`
+ // AdditionalProperties - Unmatched properties from the message are deserialized this collection
+ AdditionalProperties map[string]interface{} `json:""`
+ // Type - The write setting type.
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for JSONWriteSettings.
+func (jws JSONWriteSettings) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if jws.FilePattern != "" {
+ objectMap["filePattern"] = jws.FilePattern
+ }
+ if jws.Type != nil {
+ objectMap["type"] = jws.Type
+ }
+ for k, v := range jws.AdditionalProperties {
+ objectMap[k] = v
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for JSONWriteSettings struct.
+func (jws *JSONWriteSettings) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "filePattern":
+ if v != nil {
+ var filePattern JSONWriteFilePattern
+ err = json.Unmarshal(*v, &filePattern)
+ if err != nil {
+ return err
+ }
+ jws.FilePattern = filePattern
+ }
+ default:
+ if v != nil {
+ var additionalProperties interface{}
+ err = json.Unmarshal(*v, &additionalProperties)
+ if err != nil {
+ return err
+ }
+ if jws.AdditionalProperties == nil {
+ jws.AdditionalProperties = make(map[string]interface{})
+ }
+ jws.AdditionalProperties[k] = additionalProperties
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ jws.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
// LinkedIntegrationRuntime the linked integration runtime information.
type LinkedIntegrationRuntime struct {
// Name - READ-ONLY; The name of the linked integration runtime.
@@ -101629,6 +111100,7 @@ type BasicLinkedService interface {
AsPhoenixLinkedService() (*PhoenixLinkedService, bool)
AsPaypalLinkedService() (*PaypalLinkedService, bool)
AsMarketoLinkedService() (*MarketoLinkedService, bool)
+ AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool)
AsMariaDBLinkedService() (*MariaDBLinkedService, bool)
AsMagentoLinkedService() (*MagentoLinkedService, bool)
AsJiraLinkedService() (*JiraLinkedService, bool)
@@ -101711,7 +111183,7 @@ type LinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -101819,6 +111291,10 @@ func unmarshalBasicLinkedService(body []byte) (BasicLinkedService, error) {
var mls MarketoLinkedService
err := json.Unmarshal(body, &mls)
return mls, err
+ case string(TypeAzureMariaDB):
+ var amdls AzureMariaDBLinkedService
+ err := json.Unmarshal(body, &amdls)
+ return amdls, err
case string(TypeMariaDB):
var mdls MariaDBLinkedService
err := json.Unmarshal(body, &mdls)
@@ -102253,6 +111729,11 @@ func (ls LinkedService) AsMarketoLinkedService() (*MarketoLinkedService, bool) {
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for LinkedService.
+func (ls LinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for LinkedService.
func (ls LinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -103385,7 +112866,7 @@ type MagentoLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -103537,6 +113018,11 @@ func (mls MagentoLinkedService) AsMarketoLinkedService() (*MarketoLinkedService,
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for MagentoLinkedService.
+func (mls MagentoLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for MagentoLinkedService.
func (mls MagentoLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -104062,7 +113548,7 @@ type MagentoObjectDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -104198,6 +113684,11 @@ func (mod MagentoObjectDataset) AsMarketoObjectDataset() (*MarketoObjectDataset,
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for MagentoObjectDataset.
+func (mod MagentoObjectDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for MagentoObjectDataset.
func (mod MagentoObjectDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -104323,6 +113814,16 @@ func (mod MagentoObjectDataset) AsSapCloudForCustomerResourceDataset() (*SapClou
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for MagentoObjectDataset.
+func (mod MagentoObjectDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for MagentoObjectDataset.
+func (mod MagentoObjectDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for MagentoObjectDataset.
func (mod MagentoObjectDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -104363,6 +113864,16 @@ func (mod MagentoObjectDataset) AsRelationalTableDataset() (*RelationalTableData
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for MagentoObjectDataset.
+func (mod MagentoObjectDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for MagentoObjectDataset.
+func (mod MagentoObjectDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for MagentoObjectDataset.
func (mod MagentoObjectDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -104478,6 +113989,11 @@ func (mod MagentoObjectDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for MagentoObjectDataset.
+func (mod MagentoObjectDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for MagentoObjectDataset.
func (mod MagentoObjectDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -104488,6 +114004,11 @@ func (mod MagentoObjectDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for MagentoObjectDataset.
+func (mod MagentoObjectDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for MagentoObjectDataset.
func (mod MagentoObjectDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -104623,7 +114144,7 @@ type MagentoSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -104747,6 +114268,11 @@ func (ms MagentoSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for MagentoSource.
+func (ms MagentoSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for MagentoSource.
func (ms MagentoSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -104967,6 +114493,11 @@ func (ms MagentoSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for MagentoSource.
+func (ms MagentoSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for MagentoSource.
func (ms MagentoSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -105042,6 +114573,11 @@ func (ms MagentoSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for MagentoSource.
+func (ms MagentoSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for MagentoSource.
func (ms MagentoSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -105052,6 +114588,11 @@ func (ms MagentoSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for MagentoSource.
+func (ms MagentoSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for MagentoSource.
func (ms MagentoSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -105436,7 +114977,7 @@ type MariaDBLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -105588,6 +115129,11 @@ func (mdls MariaDBLinkedService) AsMarketoLinkedService() (*MarketoLinkedService
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for MariaDBLinkedService.
+func (mdls MariaDBLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for MariaDBLinkedService.
func (mdls MariaDBLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return &mdls, true
@@ -106031,7 +115577,7 @@ type MariaDBSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -106155,6 +115701,11 @@ func (mds MariaDBSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for MariaDBSource.
+func (mds MariaDBSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for MariaDBSource.
func (mds MariaDBSource) AsMariaDBSource() (*MariaDBSource, bool) {
return &mds, true
@@ -106375,6 +115926,11 @@ func (mds MariaDBSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for MariaDBSource.
+func (mds MariaDBSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for MariaDBSource.
func (mds MariaDBSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -106450,6 +116006,11 @@ func (mds MariaDBSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for MariaDBSource.
+func (mds MariaDBSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for MariaDBSource.
func (mds MariaDBSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -106460,6 +116021,11 @@ func (mds MariaDBSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for MariaDBSource.
+func (mds MariaDBSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for MariaDBSource.
func (mds MariaDBSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -106562,7 +116128,7 @@ type MariaDBTableDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -106698,6 +116264,11 @@ func (mdtd MariaDBTableDataset) AsMarketoObjectDataset() (*MarketoObjectDataset,
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for MariaDBTableDataset.
+func (mdtd MariaDBTableDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for MariaDBTableDataset.
func (mdtd MariaDBTableDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return &mdtd, true
@@ -106823,6 +116394,16 @@ func (mdtd MariaDBTableDataset) AsSapCloudForCustomerResourceDataset() (*SapClou
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for MariaDBTableDataset.
+func (mdtd MariaDBTableDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for MariaDBTableDataset.
+func (mdtd MariaDBTableDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for MariaDBTableDataset.
func (mdtd MariaDBTableDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -106863,6 +116444,16 @@ func (mdtd MariaDBTableDataset) AsRelationalTableDataset() (*RelationalTableData
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for MariaDBTableDataset.
+func (mdtd MariaDBTableDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for MariaDBTableDataset.
+func (mdtd MariaDBTableDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for MariaDBTableDataset.
func (mdtd MariaDBTableDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -106978,6 +116569,11 @@ func (mdtd MariaDBTableDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for MariaDBTableDataset.
+func (mdtd MariaDBTableDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for MariaDBTableDataset.
func (mdtd MariaDBTableDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -106988,6 +116584,11 @@ func (mdtd MariaDBTableDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for MariaDBTableDataset.
+func (mdtd MariaDBTableDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for MariaDBTableDataset.
func (mdtd MariaDBTableDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -107125,7 +116726,7 @@ type MarketoLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -107277,6 +116878,11 @@ func (mls MarketoLinkedService) AsMarketoLinkedService() (*MarketoLinkedService,
return &mls, true
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for MarketoLinkedService.
+func (mls MarketoLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for MarketoLinkedService.
func (mls MarketoLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -107813,7 +117419,7 @@ type MarketoObjectDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -107949,6 +117555,11 @@ func (mod MarketoObjectDataset) AsMarketoObjectDataset() (*MarketoObjectDataset,
return &mod, true
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for MarketoObjectDataset.
+func (mod MarketoObjectDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for MarketoObjectDataset.
func (mod MarketoObjectDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -108074,6 +117685,16 @@ func (mod MarketoObjectDataset) AsSapCloudForCustomerResourceDataset() (*SapClou
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for MarketoObjectDataset.
+func (mod MarketoObjectDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for MarketoObjectDataset.
+func (mod MarketoObjectDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for MarketoObjectDataset.
func (mod MarketoObjectDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -108114,6 +117735,16 @@ func (mod MarketoObjectDataset) AsRelationalTableDataset() (*RelationalTableData
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for MarketoObjectDataset.
+func (mod MarketoObjectDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for MarketoObjectDataset.
+func (mod MarketoObjectDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for MarketoObjectDataset.
func (mod MarketoObjectDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -108229,6 +117860,11 @@ func (mod MarketoObjectDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for MarketoObjectDataset.
+func (mod MarketoObjectDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for MarketoObjectDataset.
func (mod MarketoObjectDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -108239,6 +117875,11 @@ func (mod MarketoObjectDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for MarketoObjectDataset.
+func (mod MarketoObjectDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for MarketoObjectDataset.
func (mod MarketoObjectDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -108374,7 +118015,7 @@ type MarketoSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -108498,6 +118139,11 @@ func (ms MarketoSource) AsMarketoSource() (*MarketoSource, bool) {
return &ms, true
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for MarketoSource.
+func (ms MarketoSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for MarketoSource.
func (ms MarketoSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -108718,6 +118364,11 @@ func (ms MarketoSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for MarketoSource.
+func (ms MarketoSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for MarketoSource.
func (ms MarketoSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -108793,6 +118444,11 @@ func (ms MarketoSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for MarketoSource.
+func (ms MarketoSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for MarketoSource.
func (ms MarketoSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -108803,6 +118459,11 @@ func (ms MarketoSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for MarketoSource.
+func (ms MarketoSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for MarketoSource.
func (ms MarketoSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -108899,7 +118560,7 @@ type MicrosoftAccessLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -109051,6 +118712,11 @@ func (mals MicrosoftAccessLinkedService) AsMarketoLinkedService() (*MarketoLinke
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for MicrosoftAccessLinkedService.
+func (mals MicrosoftAccessLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for MicrosoftAccessLinkedService.
func (mals MicrosoftAccessLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -109571,7 +119237,7 @@ type MicrosoftAccessSink struct {
SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzurePostgreSQLSink', 'TypeDelimitedTextSink'
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
Type TypeBasicCopySink `json:"type,omitempty"`
}
@@ -109726,6 +119392,11 @@ func (mas MicrosoftAccessSink) AsParquetSink() (*ParquetSink, bool) {
return nil, false
}
+// AsAvroSink is the BasicCopySink implementation for MicrosoftAccessSink.
+func (mas MicrosoftAccessSink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
// AsAzureTableSink is the BasicCopySink implementation for MicrosoftAccessSink.
func (mas MicrosoftAccessSink) AsAzureTableSink() (*AzureTableSink, bool) {
return nil, false
@@ -109741,11 +119412,21 @@ func (mas MicrosoftAccessSink) AsSapCloudForCustomerSink() (*SapCloudForCustomer
return nil, false
}
+// AsAzureMySQLSink is the BasicCopySink implementation for MicrosoftAccessSink.
+func (mas MicrosoftAccessSink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
// AsAzurePostgreSQLSink is the BasicCopySink implementation for MicrosoftAccessSink.
func (mas MicrosoftAccessSink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
return nil, false
}
+// AsJSONSink is the BasicCopySink implementation for MicrosoftAccessSink.
+func (mas MicrosoftAccessSink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSink is the BasicCopySink implementation for MicrosoftAccessSink.
func (mas MicrosoftAccessSink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
return nil, false
@@ -109863,7 +119544,7 @@ type MicrosoftAccessSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -109987,6 +119668,11 @@ func (mas MicrosoftAccessSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for MicrosoftAccessSource.
+func (mas MicrosoftAccessSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for MicrosoftAccessSource.
func (mas MicrosoftAccessSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -110207,6 +119893,11 @@ func (mas MicrosoftAccessSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for MicrosoftAccessSource.
+func (mas MicrosoftAccessSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for MicrosoftAccessSource.
func (mas MicrosoftAccessSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -110282,6 +119973,11 @@ func (mas MicrosoftAccessSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for MicrosoftAccessSource.
+func (mas MicrosoftAccessSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for MicrosoftAccessSource.
func (mas MicrosoftAccessSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -110292,6 +119988,11 @@ func (mas MicrosoftAccessSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for MicrosoftAccessSource.
+func (mas MicrosoftAccessSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for MicrosoftAccessSource.
func (mas MicrosoftAccessSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -110394,7 +120095,7 @@ type MicrosoftAccessTableDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -110530,6 +120231,11 @@ func (matd MicrosoftAccessTableDataset) AsMarketoObjectDataset() (*MarketoObject
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for MicrosoftAccessTableDataset.
+func (matd MicrosoftAccessTableDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for MicrosoftAccessTableDataset.
func (matd MicrosoftAccessTableDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -110655,6 +120361,16 @@ func (matd MicrosoftAccessTableDataset) AsSapCloudForCustomerResourceDataset() (
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for MicrosoftAccessTableDataset.
+func (matd MicrosoftAccessTableDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for MicrosoftAccessTableDataset.
+func (matd MicrosoftAccessTableDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for MicrosoftAccessTableDataset.
func (matd MicrosoftAccessTableDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -110695,6 +120411,16 @@ func (matd MicrosoftAccessTableDataset) AsRelationalTableDataset() (*RelationalT
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for MicrosoftAccessTableDataset.
+func (matd MicrosoftAccessTableDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for MicrosoftAccessTableDataset.
+func (matd MicrosoftAccessTableDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for MicrosoftAccessTableDataset.
func (matd MicrosoftAccessTableDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -110810,6 +120536,11 @@ func (matd MicrosoftAccessTableDataset) AsBinaryDataset() (*BinaryDataset, bool)
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for MicrosoftAccessTableDataset.
+func (matd MicrosoftAccessTableDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for MicrosoftAccessTableDataset.
func (matd MicrosoftAccessTableDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -110820,6 +120551,11 @@ func (matd MicrosoftAccessTableDataset) AsParquetDataset() (*ParquetDataset, boo
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for MicrosoftAccessTableDataset.
+func (matd MicrosoftAccessTableDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for MicrosoftAccessTableDataset.
func (matd MicrosoftAccessTableDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -110969,7 +120705,7 @@ type MongoDbCollectionDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -111105,6 +120841,11 @@ func (mdcd MongoDbCollectionDataset) AsMarketoObjectDataset() (*MarketoObjectDat
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for MongoDbCollectionDataset.
+func (mdcd MongoDbCollectionDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for MongoDbCollectionDataset.
func (mdcd MongoDbCollectionDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -111230,6 +120971,16 @@ func (mdcd MongoDbCollectionDataset) AsSapCloudForCustomerResourceDataset() (*Sa
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for MongoDbCollectionDataset.
+func (mdcd MongoDbCollectionDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for MongoDbCollectionDataset.
+func (mdcd MongoDbCollectionDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for MongoDbCollectionDataset.
func (mdcd MongoDbCollectionDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -111270,6 +121021,16 @@ func (mdcd MongoDbCollectionDataset) AsRelationalTableDataset() (*RelationalTabl
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for MongoDbCollectionDataset.
+func (mdcd MongoDbCollectionDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for MongoDbCollectionDataset.
+func (mdcd MongoDbCollectionDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for MongoDbCollectionDataset.
func (mdcd MongoDbCollectionDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -111385,6 +121146,11 @@ func (mdcd MongoDbCollectionDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for MongoDbCollectionDataset.
+func (mdcd MongoDbCollectionDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for MongoDbCollectionDataset.
func (mdcd MongoDbCollectionDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -111395,6 +121161,11 @@ func (mdcd MongoDbCollectionDataset) AsParquetDataset() (*ParquetDataset, bool)
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for MongoDbCollectionDataset.
+func (mdcd MongoDbCollectionDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for MongoDbCollectionDataset.
func (mdcd MongoDbCollectionDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -111636,7 +121407,7 @@ type MongoDbLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -111788,6 +121559,11 @@ func (mdls MongoDbLinkedService) AsMarketoLinkedService() (*MarketoLinkedService
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for MongoDbLinkedService.
+func (mdls MongoDbLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for MongoDbLinkedService.
func (mdls MongoDbLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -112349,7 +122125,7 @@ type MongoDbSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -112473,6 +122249,11 @@ func (mds MongoDbSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for MongoDbSource.
+func (mds MongoDbSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for MongoDbSource.
func (mds MongoDbSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -112693,6 +122474,11 @@ func (mds MongoDbSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for MongoDbSource.
+func (mds MongoDbSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for MongoDbSource.
func (mds MongoDbSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -112768,6 +122554,11 @@ func (mds MongoDbSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for MongoDbSource.
+func (mds MongoDbSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for MongoDbSource.
func (mds MongoDbSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -112778,6 +122569,11 @@ func (mds MongoDbSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for MongoDbSource.
+func (mds MongoDbSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for MongoDbSource.
func (mds MongoDbSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -112880,7 +122676,7 @@ type MongoDbV2CollectionDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -113016,6 +122812,11 @@ func (mdvcd MongoDbV2CollectionDataset) AsMarketoObjectDataset() (*MarketoObject
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for MongoDbV2CollectionDataset.
+func (mdvcd MongoDbV2CollectionDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for MongoDbV2CollectionDataset.
func (mdvcd MongoDbV2CollectionDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -113141,6 +122942,16 @@ func (mdvcd MongoDbV2CollectionDataset) AsSapCloudForCustomerResourceDataset() (
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for MongoDbV2CollectionDataset.
+func (mdvcd MongoDbV2CollectionDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for MongoDbV2CollectionDataset.
+func (mdvcd MongoDbV2CollectionDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for MongoDbV2CollectionDataset.
func (mdvcd MongoDbV2CollectionDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -113181,6 +122992,16 @@ func (mdvcd MongoDbV2CollectionDataset) AsRelationalTableDataset() (*RelationalT
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for MongoDbV2CollectionDataset.
+func (mdvcd MongoDbV2CollectionDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for MongoDbV2CollectionDataset.
+func (mdvcd MongoDbV2CollectionDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for MongoDbV2CollectionDataset.
func (mdvcd MongoDbV2CollectionDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -113296,6 +123117,11 @@ func (mdvcd MongoDbV2CollectionDataset) AsBinaryDataset() (*BinaryDataset, bool)
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for MongoDbV2CollectionDataset.
+func (mdvcd MongoDbV2CollectionDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for MongoDbV2CollectionDataset.
func (mdvcd MongoDbV2CollectionDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -113306,6 +123132,11 @@ func (mdvcd MongoDbV2CollectionDataset) AsParquetDataset() (*ParquetDataset, boo
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for MongoDbV2CollectionDataset.
+func (mdvcd MongoDbV2CollectionDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for MongoDbV2CollectionDataset.
func (mdvcd MongoDbV2CollectionDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -113449,7 +123280,7 @@ type MongoDbV2LinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -113601,6 +123432,11 @@ func (mdvls MongoDbV2LinkedService) AsMarketoLinkedService() (*MarketoLinkedServ
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for MongoDbV2LinkedService.
+func (mdvls MongoDbV2LinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for MongoDbV2LinkedService.
func (mdvls MongoDbV2LinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -114046,7 +123882,7 @@ type MongoDbV2Source struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -114176,6 +124012,11 @@ func (mdvs MongoDbV2Source) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for MongoDbV2Source.
+func (mdvs MongoDbV2Source) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for MongoDbV2Source.
func (mdvs MongoDbV2Source) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -114396,6 +124237,11 @@ func (mdvs MongoDbV2Source) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for MongoDbV2Source.
+func (mdvs MongoDbV2Source) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for MongoDbV2Source.
func (mdvs MongoDbV2Source) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -114471,6 +124317,11 @@ func (mdvs MongoDbV2Source) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for MongoDbV2Source.
+func (mdvs MongoDbV2Source) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for MongoDbV2Source.
func (mdvs MongoDbV2Source) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -114481,6 +124332,11 @@ func (mdvs MongoDbV2Source) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for MongoDbV2Source.
+func (mdvs MongoDbV2Source) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for MongoDbV2Source.
func (mdvs MongoDbV2Source) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -114597,7 +124453,7 @@ type MultiplePipelineTrigger struct {
AdditionalProperties map[string]interface{} `json:""`
// Description - Trigger description.
Description *string `json:"description,omitempty"`
- // RuntimeState - READ-ONLY; Indicates if trigger is running or not. Updated when Start/Stop APIs are called on the Trigger. Possible values include: 'Started', 'Stopped', 'Disabled'
+ // RuntimeState - READ-ONLY; Indicates if trigger is running or not. Updated when Start/Stop APIs are called on the Trigger. Possible values include: 'TriggerRuntimeStateStarted', 'TriggerRuntimeStateStopped', 'TriggerRuntimeStateDisabled'
RuntimeState TriggerRuntimeState `json:"runtimeState,omitempty"`
// Annotations - List of tags that can be used for describing the trigger.
Annotations *[]interface{} `json:"annotations,omitempty"`
@@ -114803,7 +124659,7 @@ type MySQLLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -114955,6 +124811,11 @@ func (msls MySQLLinkedService) AsMarketoLinkedService() (*MarketoLinkedService,
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for MySQLLinkedService.
+func (msls MySQLLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for MySQLLinkedService.
func (msls MySQLLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -115439,7 +125300,7 @@ type MySQLSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -115563,6 +125424,11 @@ func (mss MySQLSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for MySQLSource.
+func (mss MySQLSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for MySQLSource.
func (mss MySQLSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -115783,6 +125649,11 @@ func (mss MySQLSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for MySQLSource.
+func (mss MySQLSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for MySQLSource.
func (mss MySQLSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -115858,6 +125729,11 @@ func (mss MySQLSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for MySQLSource.
+func (mss MySQLSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for MySQLSource.
func (mss MySQLSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -115868,6 +125744,11 @@ func (mss MySQLSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for MySQLSource.
+func (mss MySQLSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for MySQLSource.
func (mss MySQLSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -115970,7 +125851,7 @@ type MySQLTableDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -116106,6 +125987,11 @@ func (mstd MySQLTableDataset) AsMarketoObjectDataset() (*MarketoObjectDataset, b
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for MySQLTableDataset.
+func (mstd MySQLTableDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for MySQLTableDataset.
func (mstd MySQLTableDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -116231,6 +126117,16 @@ func (mstd MySQLTableDataset) AsSapCloudForCustomerResourceDataset() (*SapCloudF
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for MySQLTableDataset.
+func (mstd MySQLTableDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for MySQLTableDataset.
+func (mstd MySQLTableDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for MySQLTableDataset.
func (mstd MySQLTableDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -116271,6 +126167,16 @@ func (mstd MySQLTableDataset) AsRelationalTableDataset() (*RelationalTableDatase
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for MySQLTableDataset.
+func (mstd MySQLTableDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for MySQLTableDataset.
+func (mstd MySQLTableDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for MySQLTableDataset.
func (mstd MySQLTableDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -116386,6 +126292,11 @@ func (mstd MySQLTableDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for MySQLTableDataset.
+func (mstd MySQLTableDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for MySQLTableDataset.
func (mstd MySQLTableDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -116396,6 +126307,11 @@ func (mstd MySQLTableDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for MySQLTableDataset.
+func (mstd MySQLTableDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for MySQLTableDataset.
func (mstd MySQLTableDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -116539,7 +126455,7 @@ type NetezzaLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -116691,6 +126607,11 @@ func (nls NetezzaLinkedService) AsMarketoLinkedService() (*MarketoLinkedService,
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for NetezzaLinkedService.
+func (nls NetezzaLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for NetezzaLinkedService.
func (nls NetezzaLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -117148,7 +127069,7 @@ type NetezzaSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -117278,6 +127199,11 @@ func (ns NetezzaSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for NetezzaSource.
+func (ns NetezzaSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for NetezzaSource.
func (ns NetezzaSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -117498,6 +127424,11 @@ func (ns NetezzaSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for NetezzaSource.
+func (ns NetezzaSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for NetezzaSource.
func (ns NetezzaSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -117573,6 +127504,11 @@ func (ns NetezzaSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for NetezzaSource.
+func (ns NetezzaSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for NetezzaSource.
func (ns NetezzaSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -117583,6 +127519,11 @@ func (ns NetezzaSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for NetezzaSource.
+func (ns NetezzaSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for NetezzaSource.
func (ns NetezzaSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -117685,8 +127626,8 @@ func (ns *NetezzaSource) UnmarshalJSON(body []byte) error {
// NetezzaTableDataset netezza dataset.
type NetezzaTableDataset struct {
- // GenericDatasetTypeProperties - Properties specific to this dataset type.
- *GenericDatasetTypeProperties `json:"typeProperties,omitempty"`
+ // NetezzaTableDatasetTypeProperties - Properties specific to this dataset type.
+ *NetezzaTableDatasetTypeProperties `json:"typeProperties,omitempty"`
// AdditionalProperties - Unmatched properties from the message are deserialized this collection
AdditionalProperties map[string]interface{} `json:""`
// Description - Dataset description.
@@ -117703,7 +127644,7 @@ type NetezzaTableDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -117711,8 +127652,8 @@ type NetezzaTableDataset struct {
func (ntd NetezzaTableDataset) MarshalJSON() ([]byte, error) {
ntd.Type = TypeNetezzaTable
objectMap := make(map[string]interface{})
- if ntd.GenericDatasetTypeProperties != nil {
- objectMap["typeProperties"] = ntd.GenericDatasetTypeProperties
+ if ntd.NetezzaTableDatasetTypeProperties != nil {
+ objectMap["typeProperties"] = ntd.NetezzaTableDatasetTypeProperties
}
if ntd.Description != nil {
objectMap["description"] = ntd.Description
@@ -117839,6 +127780,11 @@ func (ntd NetezzaTableDataset) AsMarketoObjectDataset() (*MarketoObjectDataset,
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for NetezzaTableDataset.
+func (ntd NetezzaTableDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for NetezzaTableDataset.
func (ntd NetezzaTableDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -117964,6 +127910,16 @@ func (ntd NetezzaTableDataset) AsSapCloudForCustomerResourceDataset() (*SapCloud
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for NetezzaTableDataset.
+func (ntd NetezzaTableDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for NetezzaTableDataset.
+func (ntd NetezzaTableDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for NetezzaTableDataset.
func (ntd NetezzaTableDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -118004,6 +127960,16 @@ func (ntd NetezzaTableDataset) AsRelationalTableDataset() (*RelationalTableDatas
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for NetezzaTableDataset.
+func (ntd NetezzaTableDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for NetezzaTableDataset.
+func (ntd NetezzaTableDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for NetezzaTableDataset.
func (ntd NetezzaTableDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -118119,6 +128085,11 @@ func (ntd NetezzaTableDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for NetezzaTableDataset.
+func (ntd NetezzaTableDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for NetezzaTableDataset.
func (ntd NetezzaTableDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -118129,6 +128100,11 @@ func (ntd NetezzaTableDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for NetezzaTableDataset.
+func (ntd NetezzaTableDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for NetezzaTableDataset.
func (ntd NetezzaTableDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -118155,12 +128131,12 @@ func (ntd *NetezzaTableDataset) UnmarshalJSON(body []byte) error {
switch k {
case "typeProperties":
if v != nil {
- var genericDatasetTypeProperties GenericDatasetTypeProperties
- err = json.Unmarshal(*v, &genericDatasetTypeProperties)
+ var netezzaTableDatasetTypeProperties NetezzaTableDatasetTypeProperties
+ err = json.Unmarshal(*v, &netezzaTableDatasetTypeProperties)
if err != nil {
return err
}
- ntd.GenericDatasetTypeProperties = &genericDatasetTypeProperties
+ ntd.NetezzaTableDatasetTypeProperties = &netezzaTableDatasetTypeProperties
}
default:
if v != nil {
@@ -118252,6 +128228,16 @@ func (ntd *NetezzaTableDataset) UnmarshalJSON(body []byte) error {
return nil
}
+// NetezzaTableDatasetTypeProperties netezza dataset properties.
+type NetezzaTableDatasetTypeProperties struct {
+ // TableName - This property will be retired. Please consider using schema + table properties instead.
+ TableName interface{} `json:"tableName,omitempty"`
+ // Table - The table name of the Netezza. Type: string (or Expression with resultType string).
+ Table interface{} `json:"table,omitempty"`
+ // Schema - The schema name of the Netezza. Type: string (or Expression with resultType string).
+ Schema interface{} `json:"schema,omitempty"`
+}
+
// ODataLinkedService open Data Protocol (OData) linked service.
type ODataLinkedService struct {
// ODataLinkedServiceTypeProperties - OData linked service properties.
@@ -118266,7 +128252,7 @@ type ODataLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -118418,6 +128404,11 @@ func (odls ODataLinkedService) AsMarketoLinkedService() (*MarketoLinkedService,
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for ODataLinkedService.
+func (odls ODataLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for ODataLinkedService.
func (odls ODataLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -119006,7 +128997,7 @@ type ODataResourceDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -119142,6 +129133,11 @@ func (odrd ODataResourceDataset) AsMarketoObjectDataset() (*MarketoObjectDataset
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for ODataResourceDataset.
+func (odrd ODataResourceDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for ODataResourceDataset.
func (odrd ODataResourceDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -119267,6 +129263,16 @@ func (odrd ODataResourceDataset) AsSapCloudForCustomerResourceDataset() (*SapClo
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for ODataResourceDataset.
+func (odrd ODataResourceDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for ODataResourceDataset.
+func (odrd ODataResourceDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for ODataResourceDataset.
func (odrd ODataResourceDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -119307,6 +129313,16 @@ func (odrd ODataResourceDataset) AsRelationalTableDataset() (*RelationalTableDat
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for ODataResourceDataset.
+func (odrd ODataResourceDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for ODataResourceDataset.
+func (odrd ODataResourceDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for ODataResourceDataset.
func (odrd ODataResourceDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -119422,6 +129438,11 @@ func (odrd ODataResourceDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for ODataResourceDataset.
+func (odrd ODataResourceDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for ODataResourceDataset.
func (odrd ODataResourceDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -119432,6 +129453,11 @@ func (odrd ODataResourceDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for ODataResourceDataset.
+func (odrd ODataResourceDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for ODataResourceDataset.
func (odrd ODataResourceDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -119573,7 +129599,7 @@ type ODataSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -119697,6 +129723,11 @@ func (ods ODataSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for ODataSource.
+func (ods ODataSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for ODataSource.
func (ods ODataSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -119917,6 +129948,11 @@ func (ods ODataSource) AsODataSource() (*ODataSource, bool) {
return &ods, true
}
+// AsSapBwSource is the BasicCopySource implementation for ODataSource.
+func (ods ODataSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for ODataSource.
func (ods ODataSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -119992,6 +130028,11 @@ func (ods ODataSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for ODataSource.
+func (ods ODataSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for ODataSource.
func (ods ODataSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -120002,6 +130043,11 @@ func (ods ODataSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for ODataSource.
+func (ods ODataSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for ODataSource.
func (ods ODataSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -120098,7 +130144,7 @@ type OdbcLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -120250,6 +130296,11 @@ func (ols OdbcLinkedService) AsMarketoLinkedService() (*MarketoLinkedService, bo
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for OdbcLinkedService.
+func (ols OdbcLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for OdbcLinkedService.
func (ols OdbcLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -120770,7 +130821,7 @@ type OdbcSink struct {
SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzurePostgreSQLSink', 'TypeDelimitedTextSink'
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
Type TypeBasicCopySink `json:"type,omitempty"`
}
@@ -120925,6 +130976,11 @@ func (osVar OdbcSink) AsParquetSink() (*ParquetSink, bool) {
return nil, false
}
+// AsAvroSink is the BasicCopySink implementation for OdbcSink.
+func (osVar OdbcSink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
// AsAzureTableSink is the BasicCopySink implementation for OdbcSink.
func (osVar OdbcSink) AsAzureTableSink() (*AzureTableSink, bool) {
return nil, false
@@ -120940,11 +130996,21 @@ func (osVar OdbcSink) AsSapCloudForCustomerSink() (*SapCloudForCustomerSink, boo
return nil, false
}
+// AsAzureMySQLSink is the BasicCopySink implementation for OdbcSink.
+func (osVar OdbcSink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
// AsAzurePostgreSQLSink is the BasicCopySink implementation for OdbcSink.
func (osVar OdbcSink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
return nil, false
}
+// AsJSONSink is the BasicCopySink implementation for OdbcSink.
+func (osVar OdbcSink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSink is the BasicCopySink implementation for OdbcSink.
func (osVar OdbcSink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
return nil, false
@@ -121062,7 +131128,7 @@ type OdbcSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -121186,6 +131252,11 @@ func (osVar OdbcSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for OdbcSource.
+func (osVar OdbcSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for OdbcSource.
func (osVar OdbcSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -121406,6 +131477,11 @@ func (osVar OdbcSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for OdbcSource.
+func (osVar OdbcSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for OdbcSource.
func (osVar OdbcSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -121481,6 +131557,11 @@ func (osVar OdbcSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for OdbcSource.
+func (osVar OdbcSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for OdbcSource.
func (osVar OdbcSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -121491,6 +131572,11 @@ func (osVar OdbcSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for OdbcSource.
+func (osVar OdbcSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for OdbcSource.
func (osVar OdbcSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -121593,7 +131679,7 @@ type OdbcTableDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -121729,6 +131815,11 @@ func (otd OdbcTableDataset) AsMarketoObjectDataset() (*MarketoObjectDataset, boo
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for OdbcTableDataset.
+func (otd OdbcTableDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for OdbcTableDataset.
func (otd OdbcTableDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -121854,6 +131945,16 @@ func (otd OdbcTableDataset) AsSapCloudForCustomerResourceDataset() (*SapCloudFor
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for OdbcTableDataset.
+func (otd OdbcTableDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for OdbcTableDataset.
+func (otd OdbcTableDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for OdbcTableDataset.
func (otd OdbcTableDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -121894,6 +131995,16 @@ func (otd OdbcTableDataset) AsRelationalTableDataset() (*RelationalTableDataset,
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for OdbcTableDataset.
+func (otd OdbcTableDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for OdbcTableDataset.
+func (otd OdbcTableDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for OdbcTableDataset.
func (otd OdbcTableDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -122009,6 +132120,11 @@ func (otd OdbcTableDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for OdbcTableDataset.
+func (otd OdbcTableDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for OdbcTableDataset.
func (otd OdbcTableDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -122019,6 +132135,11 @@ func (otd OdbcTableDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for OdbcTableDataset.
+func (otd OdbcTableDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for OdbcTableDataset.
func (otd OdbcTableDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -122168,7 +132289,7 @@ type Office365Dataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -122304,6 +132425,11 @@ func (o3d Office365Dataset) AsMarketoObjectDataset() (*MarketoObjectDataset, boo
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for Office365Dataset.
+func (o3d Office365Dataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for Office365Dataset.
func (o3d Office365Dataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -122429,6 +132555,16 @@ func (o3d Office365Dataset) AsSapCloudForCustomerResourceDataset() (*SapCloudFor
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for Office365Dataset.
+func (o3d Office365Dataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for Office365Dataset.
+func (o3d Office365Dataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for Office365Dataset.
func (o3d Office365Dataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -122469,6 +132605,16 @@ func (o3d Office365Dataset) AsRelationalTableDataset() (*RelationalTableDataset,
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for Office365Dataset.
+func (o3d Office365Dataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for Office365Dataset.
+func (o3d Office365Dataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for Office365Dataset.
func (o3d Office365Dataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -122584,6 +132730,11 @@ func (o3d Office365Dataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for Office365Dataset.
+func (o3d Office365Dataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for Office365Dataset.
func (o3d Office365Dataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -122594,6 +132745,11 @@ func (o3d Office365Dataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for Office365Dataset.
+func (o3d Office365Dataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for Office365Dataset.
func (o3d Office365Dataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -122739,7 +132895,7 @@ type Office365LinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -122891,6 +133047,11 @@ func (o3ls Office365LinkedService) AsMarketoLinkedService() (*MarketoLinkedServi
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for Office365LinkedService.
+func (o3ls Office365LinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for Office365LinkedService.
func (o3ls Office365LinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -123387,6 +133548,16 @@ func (o3lstp *Office365LinkedServiceTypeProperties) UnmarshalJSON(body []byte) e
// Office365Source a copy activity source for an Office365 service.
type Office365Source struct {
+ // AllowedGroups - The groups containing all the users. Type: array of strings (or Expression with resultType array of strings).
+ AllowedGroups interface{} `json:"allowedGroups,omitempty"`
+ // UserScopeFilterURI - The user scope uri. Type: string (or Expression with resultType string).
+ UserScopeFilterURI interface{} `json:"userScopeFilterUri,omitempty"`
+ // DateFilterColumn - The Column to apply the and . Type: string (or Expression with resultType string).
+ DateFilterColumn interface{} `json:"dateFilterColumn,omitempty"`
+ // StartTime - Start time of the requested range for this dataset. Type: string (or Expression with resultType string).
+ StartTime interface{} `json:"startTime,omitempty"`
+ // EndTime - End time of the requested range for this dataset. Type: string (or Expression with resultType string).
+ EndTime interface{} `json:"endTime,omitempty"`
// AdditionalProperties - Unmatched properties from the message are deserialized this collection
AdditionalProperties map[string]interface{} `json:""`
// SourceRetryCount - Source retry count. Type: integer (or Expression with resultType integer).
@@ -123395,7 +133566,7 @@ type Office365Source struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -123403,6 +133574,21 @@ type Office365Source struct {
func (o3s Office365Source) MarshalJSON() ([]byte, error) {
o3s.Type = TypeOffice365Source
objectMap := make(map[string]interface{})
+ if o3s.AllowedGroups != nil {
+ objectMap["allowedGroups"] = o3s.AllowedGroups
+ }
+ if o3s.UserScopeFilterURI != nil {
+ objectMap["userScopeFilterUri"] = o3s.UserScopeFilterURI
+ }
+ if o3s.DateFilterColumn != nil {
+ objectMap["dateFilterColumn"] = o3s.DateFilterColumn
+ }
+ if o3s.StartTime != nil {
+ objectMap["startTime"] = o3s.StartTime
+ }
+ if o3s.EndTime != nil {
+ objectMap["endTime"] = o3s.EndTime
+ }
if o3s.SourceRetryCount != nil {
objectMap["sourceRetryCount"] = o3s.SourceRetryCount
}
@@ -123516,6 +133702,11 @@ func (o3s Office365Source) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for Office365Source.
+func (o3s Office365Source) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for Office365Source.
func (o3s Office365Source) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -123736,6 +133927,11 @@ func (o3s Office365Source) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for Office365Source.
+func (o3s Office365Source) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for Office365Source.
func (o3s Office365Source) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -123811,6 +134007,11 @@ func (o3s Office365Source) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for Office365Source.
+func (o3s Office365Source) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for Office365Source.
func (o3s Office365Source) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -123821,6 +134022,11 @@ func (o3s Office365Source) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for Office365Source.
+func (o3s Office365Source) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for Office365Source.
func (o3s Office365Source) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -123840,6 +134046,51 @@ func (o3s *Office365Source) UnmarshalJSON(body []byte) error {
}
for k, v := range m {
switch k {
+ case "allowedGroups":
+ if v != nil {
+ var allowedGroups interface{}
+ err = json.Unmarshal(*v, &allowedGroups)
+ if err != nil {
+ return err
+ }
+ o3s.AllowedGroups = allowedGroups
+ }
+ case "userScopeFilterUri":
+ if v != nil {
+ var userScopeFilterURI interface{}
+ err = json.Unmarshal(*v, &userScopeFilterURI)
+ if err != nil {
+ return err
+ }
+ o3s.UserScopeFilterURI = userScopeFilterURI
+ }
+ case "dateFilterColumn":
+ if v != nil {
+ var dateFilterColumn interface{}
+ err = json.Unmarshal(*v, &dateFilterColumn)
+ if err != nil {
+ return err
+ }
+ o3s.DateFilterColumn = dateFilterColumn
+ }
+ case "startTime":
+ if v != nil {
+ var startTime interface{}
+ err = json.Unmarshal(*v, &startTime)
+ if err != nil {
+ return err
+ }
+ o3s.StartTime = startTime
+ }
+ case "endTime":
+ if v != nil {
+ var endTime interface{}
+ err = json.Unmarshal(*v, &endTime)
+ if err != nil {
+ return err
+ }
+ o3s.EndTime = endTime
+ }
default:
if v != nil {
var additionalProperties interface{}
@@ -124213,7 +134464,7 @@ type OracleLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -124365,6 +134616,11 @@ func (ols OracleLinkedService) AsMarketoLinkedService() (*MarketoLinkedService,
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for OracleLinkedService.
+func (ols OracleLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for OracleLinkedService.
func (ols OracleLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -124822,7 +135078,7 @@ type OracleServiceCloudLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -124974,6 +135230,11 @@ func (oscls OracleServiceCloudLinkedService) AsMarketoLinkedService() (*MarketoL
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for OracleServiceCloudLinkedService.
+func (oscls OracleServiceCloudLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for OracleServiceCloudLinkedService.
func (oscls OracleServiceCloudLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -125510,7 +135771,7 @@ type OracleServiceCloudObjectDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -125646,6 +135907,11 @@ func (oscod OracleServiceCloudObjectDataset) AsMarketoObjectDataset() (*MarketoO
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for OracleServiceCloudObjectDataset.
+func (oscod OracleServiceCloudObjectDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for OracleServiceCloudObjectDataset.
func (oscod OracleServiceCloudObjectDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -125771,6 +136037,16 @@ func (oscod OracleServiceCloudObjectDataset) AsSapCloudForCustomerResourceDatase
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for OracleServiceCloudObjectDataset.
+func (oscod OracleServiceCloudObjectDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for OracleServiceCloudObjectDataset.
+func (oscod OracleServiceCloudObjectDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for OracleServiceCloudObjectDataset.
func (oscod OracleServiceCloudObjectDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -125811,6 +136087,16 @@ func (oscod OracleServiceCloudObjectDataset) AsRelationalTableDataset() (*Relati
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for OracleServiceCloudObjectDataset.
+func (oscod OracleServiceCloudObjectDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for OracleServiceCloudObjectDataset.
+func (oscod OracleServiceCloudObjectDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for OracleServiceCloudObjectDataset.
func (oscod OracleServiceCloudObjectDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -125926,6 +136212,11 @@ func (oscod OracleServiceCloudObjectDataset) AsBinaryDataset() (*BinaryDataset,
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for OracleServiceCloudObjectDataset.
+func (oscod OracleServiceCloudObjectDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for OracleServiceCloudObjectDataset.
func (oscod OracleServiceCloudObjectDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -125936,6 +136227,11 @@ func (oscod OracleServiceCloudObjectDataset) AsParquetDataset() (*ParquetDataset
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for OracleServiceCloudObjectDataset.
+func (oscod OracleServiceCloudObjectDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for OracleServiceCloudObjectDataset.
func (oscod OracleServiceCloudObjectDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -126071,7 +136367,7 @@ type OracleServiceCloudSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -126195,6 +136491,11 @@ func (oscs OracleServiceCloudSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for OracleServiceCloudSource.
+func (oscs OracleServiceCloudSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for OracleServiceCloudSource.
func (oscs OracleServiceCloudSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -126415,6 +136716,11 @@ func (oscs OracleServiceCloudSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for OracleServiceCloudSource.
+func (oscs OracleServiceCloudSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for OracleServiceCloudSource.
func (oscs OracleServiceCloudSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -126490,6 +136796,11 @@ func (oscs OracleServiceCloudSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for OracleServiceCloudSource.
+func (oscs OracleServiceCloudSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for OracleServiceCloudSource.
func (oscs OracleServiceCloudSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -126500,6 +136811,11 @@ func (oscs OracleServiceCloudSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for OracleServiceCloudSource.
+func (oscs OracleServiceCloudSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for OracleServiceCloudSource.
func (oscs OracleServiceCloudSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -126598,7 +136914,7 @@ type OracleSink struct {
SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzurePostgreSQLSink', 'TypeDelimitedTextSink'
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
Type TypeBasicCopySink `json:"type,omitempty"`
}
@@ -126753,6 +137069,11 @@ func (osVar OracleSink) AsParquetSink() (*ParquetSink, bool) {
return nil, false
}
+// AsAvroSink is the BasicCopySink implementation for OracleSink.
+func (osVar OracleSink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
// AsAzureTableSink is the BasicCopySink implementation for OracleSink.
func (osVar OracleSink) AsAzureTableSink() (*AzureTableSink, bool) {
return nil, false
@@ -126768,11 +137089,21 @@ func (osVar OracleSink) AsSapCloudForCustomerSink() (*SapCloudForCustomerSink, b
return nil, false
}
+// AsAzureMySQLSink is the BasicCopySink implementation for OracleSink.
+func (osVar OracleSink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
// AsAzurePostgreSQLSink is the BasicCopySink implementation for OracleSink.
func (osVar OracleSink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
return nil, false
}
+// AsJSONSink is the BasicCopySink implementation for OracleSink.
+func (osVar OracleSink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSink is the BasicCopySink implementation for OracleSink.
func (osVar OracleSink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
return nil, false
@@ -126896,7 +137227,7 @@ type OracleSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -127029,6 +137360,11 @@ func (osVar OracleSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for OracleSource.
+func (osVar OracleSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for OracleSource.
func (osVar OracleSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -127249,6 +137585,11 @@ func (osVar OracleSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for OracleSource.
+func (osVar OracleSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for OracleSource.
func (osVar OracleSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -127324,6 +137665,11 @@ func (osVar OracleSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for OracleSource.
+func (osVar OracleSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for OracleSource.
func (osVar OracleSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -127334,6 +137680,11 @@ func (osVar OracleSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for OracleSource.
+func (osVar OracleSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for OracleSource.
func (osVar OracleSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -127463,7 +137814,7 @@ type OracleTableDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -127599,6 +137950,11 @@ func (otd OracleTableDataset) AsMarketoObjectDataset() (*MarketoObjectDataset, b
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for OracleTableDataset.
+func (otd OracleTableDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for OracleTableDataset.
func (otd OracleTableDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -127724,6 +138080,16 @@ func (otd OracleTableDataset) AsSapCloudForCustomerResourceDataset() (*SapCloudF
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for OracleTableDataset.
+func (otd OracleTableDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for OracleTableDataset.
+func (otd OracleTableDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for OracleTableDataset.
func (otd OracleTableDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -127764,6 +138130,16 @@ func (otd OracleTableDataset) AsRelationalTableDataset() (*RelationalTableDatase
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for OracleTableDataset.
+func (otd OracleTableDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for OracleTableDataset.
+func (otd OracleTableDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for OracleTableDataset.
func (otd OracleTableDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -127879,6 +138255,11 @@ func (otd OracleTableDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for OracleTableDataset.
+func (otd OracleTableDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for OracleTableDataset.
func (otd OracleTableDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -127889,6 +138270,11 @@ func (otd OracleTableDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for OracleTableDataset.
+func (otd OracleTableDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for OracleTableDataset.
func (otd OracleTableDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -128014,8 +138400,12 @@ func (otd *OracleTableDataset) UnmarshalJSON(body []byte) error {
// OracleTableDatasetTypeProperties on-premises Oracle dataset properties.
type OracleTableDatasetTypeProperties struct {
- // TableName - The table name of the on-premises Oracle database. Type: string (or Expression with resultType string).
+ // TableName - This property will be retired. Please consider using schema + table properties instead.
TableName interface{} `json:"tableName,omitempty"`
+ // Schema - The schema name of the on-premises Oracle database. Type: string (or Expression with resultType string).
+ Schema interface{} `json:"schema,omitempty"`
+ // Table - The table name of the on-premises Oracle database. Type: string (or Expression with resultType string).
+ Table interface{} `json:"table,omitempty"`
}
// OrcFormat the data stored in Optimized Row Columnar (ORC) format.
@@ -128166,7 +138556,7 @@ type ParquetDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -128302,6 +138692,11 @@ func (pd ParquetDataset) AsMarketoObjectDataset() (*MarketoObjectDataset, bool)
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for ParquetDataset.
+func (pd ParquetDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for ParquetDataset.
func (pd ParquetDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -128427,6 +138822,16 @@ func (pd ParquetDataset) AsSapCloudForCustomerResourceDataset() (*SapCloudForCus
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for ParquetDataset.
+func (pd ParquetDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for ParquetDataset.
+func (pd ParquetDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for ParquetDataset.
func (pd ParquetDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -128467,6 +138872,16 @@ func (pd ParquetDataset) AsRelationalTableDataset() (*RelationalTableDataset, bo
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for ParquetDataset.
+func (pd ParquetDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for ParquetDataset.
+func (pd ParquetDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for ParquetDataset.
func (pd ParquetDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -128582,6 +138997,11 @@ func (pd ParquetDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for ParquetDataset.
+func (pd ParquetDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for ParquetDataset.
func (pd ParquetDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -128592,6 +139012,11 @@ func (pd ParquetDataset) AsParquetDataset() (*ParquetDataset, bool) {
return &pd, true
}
+// AsAvroDataset is the BasicDataset implementation for ParquetDataset.
+func (pd ParquetDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for ParquetDataset.
func (pd ParquetDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -128845,7 +139270,7 @@ func (pf *ParquetFormat) UnmarshalJSON(body []byte) error {
// ParquetSink a copy activity Parquet sink.
type ParquetSink struct {
// StoreSettings - Parquet store settings.
- StoreSettings *StoreWriteSettings `json:"storeSettings,omitempty"`
+ StoreSettings BasicStoreWriteSettings `json:"storeSettings,omitempty"`
// AdditionalProperties - Unmatched properties from the message are deserialized this collection
AdditionalProperties map[string]interface{} `json:""`
// WriteBatchSize - Write batch size. Type: integer (or Expression with resultType integer), minimum: 0.
@@ -128858,7 +139283,7 @@ type ParquetSink struct {
SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzurePostgreSQLSink', 'TypeDelimitedTextSink'
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
Type TypeBasicCopySink `json:"type,omitempty"`
}
@@ -128866,9 +139291,7 @@ type ParquetSink struct {
func (ps ParquetSink) MarshalJSON() ([]byte, error) {
ps.Type = TypeParquetSink
objectMap := make(map[string]interface{})
- if ps.StoreSettings != nil {
- objectMap["storeSettings"] = ps.StoreSettings
- }
+ objectMap["storeSettings"] = ps.StoreSettings
if ps.WriteBatchSize != nil {
objectMap["writeBatchSize"] = ps.WriteBatchSize
}
@@ -129013,6 +139436,11 @@ func (ps ParquetSink) AsParquetSink() (*ParquetSink, bool) {
return &ps, true
}
+// AsAvroSink is the BasicCopySink implementation for ParquetSink.
+func (ps ParquetSink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
// AsAzureTableSink is the BasicCopySink implementation for ParquetSink.
func (ps ParquetSink) AsAzureTableSink() (*AzureTableSink, bool) {
return nil, false
@@ -129028,11 +139456,21 @@ func (ps ParquetSink) AsSapCloudForCustomerSink() (*SapCloudForCustomerSink, boo
return nil, false
}
+// AsAzureMySQLSink is the BasicCopySink implementation for ParquetSink.
+func (ps ParquetSink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
// AsAzurePostgreSQLSink is the BasicCopySink implementation for ParquetSink.
func (ps ParquetSink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
return nil, false
}
+// AsJSONSink is the BasicCopySink implementation for ParquetSink.
+func (ps ParquetSink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSink is the BasicCopySink implementation for ParquetSink.
func (ps ParquetSink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
return nil, false
@@ -129059,12 +139497,11 @@ func (ps *ParquetSink) UnmarshalJSON(body []byte) error {
switch k {
case "storeSettings":
if v != nil {
- var storeSettings StoreWriteSettings
- err = json.Unmarshal(*v, &storeSettings)
+ storeSettings, err := unmarshalBasicStoreWriteSettings(*v)
if err != nil {
return err
}
- ps.StoreSettings = &storeSettings
+ ps.StoreSettings = storeSettings
}
default:
if v != nil {
@@ -129150,7 +139587,7 @@ type ParquetSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -129274,6 +139711,11 @@ func (ps ParquetSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for ParquetSource.
+func (ps ParquetSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for ParquetSource.
func (ps ParquetSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -129494,6 +139936,11 @@ func (ps ParquetSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for ParquetSource.
+func (ps ParquetSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for ParquetSource.
func (ps ParquetSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -129569,6 +140016,11 @@ func (ps ParquetSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for ParquetSource.
+func (ps ParquetSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for ParquetSource.
func (ps ParquetSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -129579,6 +140031,11 @@ func (ps ParquetSource) AsParquetSource() (*ParquetSource, bool) {
return &ps, true
}
+// AsAvroSource is the BasicCopySource implementation for ParquetSource.
+func (ps ParquetSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for ParquetSource.
func (ps ParquetSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -129675,7 +140132,7 @@ type PaypalLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -129827,6 +140284,11 @@ func (pls PaypalLinkedService) AsMarketoLinkedService() (*MarketoLinkedService,
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for PaypalLinkedService.
+func (pls PaypalLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for PaypalLinkedService.
func (pls PaypalLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -130363,7 +140825,7 @@ type PaypalObjectDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -130499,6 +140961,11 @@ func (pod PaypalObjectDataset) AsMarketoObjectDataset() (*MarketoObjectDataset,
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for PaypalObjectDataset.
+func (pod PaypalObjectDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for PaypalObjectDataset.
func (pod PaypalObjectDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -130624,6 +141091,16 @@ func (pod PaypalObjectDataset) AsSapCloudForCustomerResourceDataset() (*SapCloud
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for PaypalObjectDataset.
+func (pod PaypalObjectDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for PaypalObjectDataset.
+func (pod PaypalObjectDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for PaypalObjectDataset.
func (pod PaypalObjectDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -130664,6 +141141,16 @@ func (pod PaypalObjectDataset) AsRelationalTableDataset() (*RelationalTableDatas
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for PaypalObjectDataset.
+func (pod PaypalObjectDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for PaypalObjectDataset.
+func (pod PaypalObjectDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for PaypalObjectDataset.
func (pod PaypalObjectDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -130779,6 +141266,11 @@ func (pod PaypalObjectDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for PaypalObjectDataset.
+func (pod PaypalObjectDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for PaypalObjectDataset.
func (pod PaypalObjectDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -130789,6 +141281,11 @@ func (pod PaypalObjectDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for PaypalObjectDataset.
+func (pod PaypalObjectDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for PaypalObjectDataset.
func (pod PaypalObjectDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -130924,7 +141421,7 @@ type PaypalSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -131048,6 +141545,11 @@ func (ps PaypalSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for PaypalSource.
+func (ps PaypalSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for PaypalSource.
func (ps PaypalSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -131268,6 +141770,11 @@ func (ps PaypalSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for PaypalSource.
+func (ps PaypalSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for PaypalSource.
func (ps PaypalSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -131343,6 +141850,11 @@ func (ps PaypalSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for PaypalSource.
+func (ps PaypalSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for PaypalSource.
func (ps PaypalSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -131353,6 +141865,11 @@ func (ps PaypalSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for PaypalSource.
+func (ps PaypalSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for PaypalSource.
func (ps PaypalSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -131435,6 +141952,16 @@ func (ps *PaypalSource) UnmarshalJSON(body []byte) error {
return nil
}
+// PhoenixDatasetTypeProperties phoenix Dataset Properties
+type PhoenixDatasetTypeProperties struct {
+ // TableName - This property will be retired. Please consider using schema + table properties instead.
+ TableName interface{} `json:"tableName,omitempty"`
+ // Table - The table name of the Phoenix. Type: string (or Expression with resultType string).
+ Table interface{} `json:"table,omitempty"`
+ // Schema - The schema name of the Phoenix. Type: string (or Expression with resultType string).
+ Schema interface{} `json:"schema,omitempty"`
+}
+
// PhoenixLinkedService phoenix server linked service.
type PhoenixLinkedService struct {
// PhoenixLinkedServiceTypeProperties - Phoenix server linked service properties.
@@ -131449,7 +141976,7 @@ type PhoenixLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -131601,6 +142128,11 @@ func (pls PhoenixLinkedService) AsMarketoLinkedService() (*MarketoLinkedService,
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for PhoenixLinkedService.
+func (pls PhoenixLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for PhoenixLinkedService.
func (pls PhoenixLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -132174,8 +142706,8 @@ func (plstp *PhoenixLinkedServiceTypeProperties) UnmarshalJSON(body []byte) erro
// PhoenixObjectDataset phoenix server dataset.
type PhoenixObjectDataset struct {
- // GenericDatasetTypeProperties - Properties specific to this dataset type.
- *GenericDatasetTypeProperties `json:"typeProperties,omitempty"`
+ // PhoenixDatasetTypeProperties - Properties specific to this dataset type.
+ *PhoenixDatasetTypeProperties `json:"typeProperties,omitempty"`
// AdditionalProperties - Unmatched properties from the message are deserialized this collection
AdditionalProperties map[string]interface{} `json:""`
// Description - Dataset description.
@@ -132192,7 +142724,7 @@ type PhoenixObjectDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -132200,8 +142732,8 @@ type PhoenixObjectDataset struct {
func (pod PhoenixObjectDataset) MarshalJSON() ([]byte, error) {
pod.Type = TypePhoenixObject
objectMap := make(map[string]interface{})
- if pod.GenericDatasetTypeProperties != nil {
- objectMap["typeProperties"] = pod.GenericDatasetTypeProperties
+ if pod.PhoenixDatasetTypeProperties != nil {
+ objectMap["typeProperties"] = pod.PhoenixDatasetTypeProperties
}
if pod.Description != nil {
objectMap["description"] = pod.Description
@@ -132328,6 +142860,11 @@ func (pod PhoenixObjectDataset) AsMarketoObjectDataset() (*MarketoObjectDataset,
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for PhoenixObjectDataset.
+func (pod PhoenixObjectDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for PhoenixObjectDataset.
func (pod PhoenixObjectDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -132453,6 +142990,16 @@ func (pod PhoenixObjectDataset) AsSapCloudForCustomerResourceDataset() (*SapClou
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for PhoenixObjectDataset.
+func (pod PhoenixObjectDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for PhoenixObjectDataset.
+func (pod PhoenixObjectDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for PhoenixObjectDataset.
func (pod PhoenixObjectDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -132493,6 +143040,16 @@ func (pod PhoenixObjectDataset) AsRelationalTableDataset() (*RelationalTableData
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for PhoenixObjectDataset.
+func (pod PhoenixObjectDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for PhoenixObjectDataset.
+func (pod PhoenixObjectDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for PhoenixObjectDataset.
func (pod PhoenixObjectDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -132608,6 +143165,11 @@ func (pod PhoenixObjectDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for PhoenixObjectDataset.
+func (pod PhoenixObjectDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for PhoenixObjectDataset.
func (pod PhoenixObjectDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -132618,6 +143180,11 @@ func (pod PhoenixObjectDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for PhoenixObjectDataset.
+func (pod PhoenixObjectDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for PhoenixObjectDataset.
func (pod PhoenixObjectDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -132644,12 +143211,12 @@ func (pod *PhoenixObjectDataset) UnmarshalJSON(body []byte) error {
switch k {
case "typeProperties":
if v != nil {
- var genericDatasetTypeProperties GenericDatasetTypeProperties
- err = json.Unmarshal(*v, &genericDatasetTypeProperties)
+ var phoenixDatasetTypeProperties PhoenixDatasetTypeProperties
+ err = json.Unmarshal(*v, &phoenixDatasetTypeProperties)
if err != nil {
return err
}
- pod.GenericDatasetTypeProperties = &genericDatasetTypeProperties
+ pod.PhoenixDatasetTypeProperties = &phoenixDatasetTypeProperties
}
default:
if v != nil {
@@ -132753,7 +143320,7 @@ type PhoenixSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -132877,6 +143444,11 @@ func (ps PhoenixSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for PhoenixSource.
+func (ps PhoenixSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for PhoenixSource.
func (ps PhoenixSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -133097,6 +143669,11 @@ func (ps PhoenixSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for PhoenixSource.
+func (ps PhoenixSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for PhoenixSource.
func (ps PhoenixSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -133172,6 +143749,11 @@ func (ps PhoenixSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for PhoenixSource.
+func (ps PhoenixSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for PhoenixSource.
func (ps PhoenixSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -133182,6 +143764,11 @@ func (ps PhoenixSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for PhoenixSource.
+func (ps PhoenixSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for PhoenixSource.
func (ps PhoenixSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -133956,7 +144543,7 @@ type PostgreSQLLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -134108,6 +144695,11 @@ func (psls PostgreSQLLinkedService) AsMarketoLinkedService() (*MarketoLinkedServ
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for PostgreSQLLinkedService.
+func (psls PostgreSQLLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for PostgreSQLLinkedService.
func (psls PostgreSQLLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -134592,7 +145184,7 @@ type PostgreSQLSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -134716,6 +145308,11 @@ func (pss PostgreSQLSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for PostgreSQLSource.
+func (pss PostgreSQLSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for PostgreSQLSource.
func (pss PostgreSQLSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -134936,6 +145533,11 @@ func (pss PostgreSQLSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for PostgreSQLSource.
+func (pss PostgreSQLSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for PostgreSQLSource.
func (pss PostgreSQLSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -135011,6 +145613,11 @@ func (pss PostgreSQLSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for PostgreSQLSource.
+func (pss PostgreSQLSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for PostgreSQLSource.
func (pss PostgreSQLSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -135021,6 +145628,11 @@ func (pss PostgreSQLSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for PostgreSQLSource.
+func (pss PostgreSQLSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for PostgreSQLSource.
func (pss PostgreSQLSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -135123,7 +145735,7 @@ type PostgreSQLTableDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -135259,6 +145871,11 @@ func (pstd PostgreSQLTableDataset) AsMarketoObjectDataset() (*MarketoObjectDatas
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for PostgreSQLTableDataset.
+func (pstd PostgreSQLTableDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for PostgreSQLTableDataset.
func (pstd PostgreSQLTableDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -135384,6 +146001,16 @@ func (pstd PostgreSQLTableDataset) AsSapCloudForCustomerResourceDataset() (*SapC
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for PostgreSQLTableDataset.
+func (pstd PostgreSQLTableDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for PostgreSQLTableDataset.
+func (pstd PostgreSQLTableDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for PostgreSQLTableDataset.
func (pstd PostgreSQLTableDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -135424,6 +146051,16 @@ func (pstd PostgreSQLTableDataset) AsRelationalTableDataset() (*RelationalTableD
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for PostgreSQLTableDataset.
+func (pstd PostgreSQLTableDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for PostgreSQLTableDataset.
+func (pstd PostgreSQLTableDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for PostgreSQLTableDataset.
func (pstd PostgreSQLTableDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -135539,6 +146176,11 @@ func (pstd PostgreSQLTableDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for PostgreSQLTableDataset.
+func (pstd PostgreSQLTableDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for PostgreSQLTableDataset.
func (pstd PostgreSQLTableDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -135549,6 +146191,11 @@ func (pstd PostgreSQLTableDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for PostgreSQLTableDataset.
+func (pstd PostgreSQLTableDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for PostgreSQLTableDataset.
func (pstd PostgreSQLTableDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -135674,8 +146321,22 @@ func (pstd *PostgreSQLTableDataset) UnmarshalJSON(body []byte) error {
// PostgreSQLTableDatasetTypeProperties postgreSQL table dataset properties.
type PostgreSQLTableDatasetTypeProperties struct {
- // TableName - The PostgreSQL table name. Type: string (or Expression with resultType string).
+ // TableName - This property will be retired. Please consider using schema + table properties instead.
+ TableName interface{} `json:"tableName,omitempty"`
+ // Table - The PostgreSQL table name. Type: string (or Expression with resultType string).
+ Table interface{} `json:"table,omitempty"`
+ // Schema - The PostgreSQL schema name. Type: string (or Expression with resultType string).
+ Schema interface{} `json:"schema,omitempty"`
+}
+
+// PrestoDatasetTypeProperties presto Dataset Properties
+type PrestoDatasetTypeProperties struct {
+ // TableName - This property will be retired. Please consider using schema + table properties instead.
TableName interface{} `json:"tableName,omitempty"`
+ // Table - The table name of the Presto. Type: string (or Expression with resultType string).
+ Table interface{} `json:"table,omitempty"`
+ // Schema - The schema name of the Presto. Type: string (or Expression with resultType string).
+ Schema interface{} `json:"schema,omitempty"`
}
// PrestoLinkedService presto server linked service.
@@ -135692,7 +146353,7 @@ type PrestoLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -135844,6 +146505,11 @@ func (pls PrestoLinkedService) AsMarketoLinkedService() (*MarketoLinkedService,
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for PrestoLinkedService.
+func (pls PrestoLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for PrestoLinkedService.
func (pls PrestoLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -136439,8 +147105,8 @@ func (plstp *PrestoLinkedServiceTypeProperties) UnmarshalJSON(body []byte) error
// PrestoObjectDataset presto server dataset.
type PrestoObjectDataset struct {
- // GenericDatasetTypeProperties - Properties specific to this dataset type.
- *GenericDatasetTypeProperties `json:"typeProperties,omitempty"`
+ // PrestoDatasetTypeProperties - Properties specific to this dataset type.
+ *PrestoDatasetTypeProperties `json:"typeProperties,omitempty"`
// AdditionalProperties - Unmatched properties from the message are deserialized this collection
AdditionalProperties map[string]interface{} `json:""`
// Description - Dataset description.
@@ -136457,7 +147123,7 @@ type PrestoObjectDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -136465,8 +147131,8 @@ type PrestoObjectDataset struct {
func (pod PrestoObjectDataset) MarshalJSON() ([]byte, error) {
pod.Type = TypePrestoObject
objectMap := make(map[string]interface{})
- if pod.GenericDatasetTypeProperties != nil {
- objectMap["typeProperties"] = pod.GenericDatasetTypeProperties
+ if pod.PrestoDatasetTypeProperties != nil {
+ objectMap["typeProperties"] = pod.PrestoDatasetTypeProperties
}
if pod.Description != nil {
objectMap["description"] = pod.Description
@@ -136593,6 +147259,11 @@ func (pod PrestoObjectDataset) AsMarketoObjectDataset() (*MarketoObjectDataset,
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for PrestoObjectDataset.
+func (pod PrestoObjectDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for PrestoObjectDataset.
func (pod PrestoObjectDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -136718,6 +147389,16 @@ func (pod PrestoObjectDataset) AsSapCloudForCustomerResourceDataset() (*SapCloud
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for PrestoObjectDataset.
+func (pod PrestoObjectDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for PrestoObjectDataset.
+func (pod PrestoObjectDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for PrestoObjectDataset.
func (pod PrestoObjectDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -136758,6 +147439,16 @@ func (pod PrestoObjectDataset) AsRelationalTableDataset() (*RelationalTableDatas
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for PrestoObjectDataset.
+func (pod PrestoObjectDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for PrestoObjectDataset.
+func (pod PrestoObjectDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for PrestoObjectDataset.
func (pod PrestoObjectDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -136873,6 +147564,11 @@ func (pod PrestoObjectDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for PrestoObjectDataset.
+func (pod PrestoObjectDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for PrestoObjectDataset.
func (pod PrestoObjectDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -136883,6 +147579,11 @@ func (pod PrestoObjectDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for PrestoObjectDataset.
+func (pod PrestoObjectDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for PrestoObjectDataset.
func (pod PrestoObjectDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -136909,12 +147610,12 @@ func (pod *PrestoObjectDataset) UnmarshalJSON(body []byte) error {
switch k {
case "typeProperties":
if v != nil {
- var genericDatasetTypeProperties GenericDatasetTypeProperties
- err = json.Unmarshal(*v, &genericDatasetTypeProperties)
+ var prestoDatasetTypeProperties PrestoDatasetTypeProperties
+ err = json.Unmarshal(*v, &prestoDatasetTypeProperties)
if err != nil {
return err
}
- pod.GenericDatasetTypeProperties = &genericDatasetTypeProperties
+ pod.PrestoDatasetTypeProperties = &prestoDatasetTypeProperties
}
default:
if v != nil {
@@ -137018,7 +147719,7 @@ type PrestoSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -137142,6 +147843,11 @@ func (ps PrestoSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for PrestoSource.
+func (ps PrestoSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for PrestoSource.
func (ps PrestoSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -137362,6 +148068,11 @@ func (ps PrestoSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for PrestoSource.
+func (ps PrestoSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for PrestoSource.
func (ps PrestoSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -137437,6 +148148,11 @@ func (ps PrestoSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for PrestoSource.
+func (ps PrestoSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for PrestoSource.
func (ps PrestoSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -137447,6 +148163,11 @@ func (ps PrestoSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for PrestoSource.
+func (ps PrestoSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for PrestoSource.
func (ps PrestoSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -137543,7 +148264,7 @@ type QuickBooksLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -137695,6 +148416,11 @@ func (qbls QuickBooksLinkedService) AsMarketoLinkedService() (*MarketoLinkedServ
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for QuickBooksLinkedService.
+func (qbls QuickBooksLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for QuickBooksLinkedService.
func (qbls QuickBooksLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -138240,7 +148966,7 @@ type QuickBooksObjectDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -138376,6 +149102,11 @@ func (qbod QuickBooksObjectDataset) AsMarketoObjectDataset() (*MarketoObjectData
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for QuickBooksObjectDataset.
+func (qbod QuickBooksObjectDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for QuickBooksObjectDataset.
func (qbod QuickBooksObjectDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -138501,6 +149232,16 @@ func (qbod QuickBooksObjectDataset) AsSapCloudForCustomerResourceDataset() (*Sap
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for QuickBooksObjectDataset.
+func (qbod QuickBooksObjectDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for QuickBooksObjectDataset.
+func (qbod QuickBooksObjectDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for QuickBooksObjectDataset.
func (qbod QuickBooksObjectDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -138541,6 +149282,16 @@ func (qbod QuickBooksObjectDataset) AsRelationalTableDataset() (*RelationalTable
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for QuickBooksObjectDataset.
+func (qbod QuickBooksObjectDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for QuickBooksObjectDataset.
+func (qbod QuickBooksObjectDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for QuickBooksObjectDataset.
func (qbod QuickBooksObjectDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -138656,6 +149407,11 @@ func (qbod QuickBooksObjectDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for QuickBooksObjectDataset.
+func (qbod QuickBooksObjectDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for QuickBooksObjectDataset.
func (qbod QuickBooksObjectDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -138666,6 +149422,11 @@ func (qbod QuickBooksObjectDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for QuickBooksObjectDataset.
+func (qbod QuickBooksObjectDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for QuickBooksObjectDataset.
func (qbod QuickBooksObjectDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -138801,7 +149562,7 @@ type QuickBooksSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -138925,6 +149686,11 @@ func (qbs QuickBooksSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for QuickBooksSource.
+func (qbs QuickBooksSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for QuickBooksSource.
func (qbs QuickBooksSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -139145,6 +149911,11 @@ func (qbs QuickBooksSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for QuickBooksSource.
+func (qbs QuickBooksSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for QuickBooksSource.
func (qbs QuickBooksSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -139220,6 +149991,11 @@ func (qbs QuickBooksSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for QuickBooksSource.
+func (qbs QuickBooksSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for QuickBooksSource.
func (qbs QuickBooksSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -139230,6 +150006,11 @@ func (qbs QuickBooksSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for QuickBooksSource.
+func (qbs QuickBooksSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for QuickBooksSource.
func (qbs QuickBooksSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -139586,7 +150367,7 @@ type RelationalSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -139710,6 +150491,11 @@ func (rs RelationalSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for RelationalSource.
+func (rs RelationalSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for RelationalSource.
func (rs RelationalSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -139930,6 +150716,11 @@ func (rs RelationalSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for RelationalSource.
+func (rs RelationalSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for RelationalSource.
func (rs RelationalSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -140005,6 +150796,11 @@ func (rs RelationalSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for RelationalSource.
+func (rs RelationalSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for RelationalSource.
func (rs RelationalSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -140015,6 +150811,11 @@ func (rs RelationalSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for RelationalSource.
+func (rs RelationalSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for RelationalSource.
func (rs RelationalSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -140117,7 +150918,7 @@ type RelationalTableDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -140253,6 +151054,11 @@ func (rtd RelationalTableDataset) AsMarketoObjectDataset() (*MarketoObjectDatase
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for RelationalTableDataset.
+func (rtd RelationalTableDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for RelationalTableDataset.
func (rtd RelationalTableDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -140378,6 +151184,16 @@ func (rtd RelationalTableDataset) AsSapCloudForCustomerResourceDataset() (*SapCl
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for RelationalTableDataset.
+func (rtd RelationalTableDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for RelationalTableDataset.
+func (rtd RelationalTableDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for RelationalTableDataset.
func (rtd RelationalTableDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -140418,6 +151234,16 @@ func (rtd RelationalTableDataset) AsRelationalTableDataset() (*RelationalTableDa
return &rtd, true
}
+// AsDb2TableDataset is the BasicDataset implementation for RelationalTableDataset.
+func (rtd RelationalTableDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for RelationalTableDataset.
+func (rtd RelationalTableDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for RelationalTableDataset.
func (rtd RelationalTableDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -140533,6 +151359,11 @@ func (rtd RelationalTableDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for RelationalTableDataset.
+func (rtd RelationalTableDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for RelationalTableDataset.
func (rtd RelationalTableDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -140543,6 +151374,11 @@ func (rtd RelationalTableDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for RelationalTableDataset.
+func (rtd RelationalTableDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for RelationalTableDataset.
func (rtd RelationalTableDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -140910,7 +151746,7 @@ type RerunTumblingWindowTrigger struct {
AdditionalProperties map[string]interface{} `json:""`
// Description - Trigger description.
Description *string `json:"description,omitempty"`
- // RuntimeState - READ-ONLY; Indicates if trigger is running or not. Updated when Start/Stop APIs are called on the Trigger. Possible values include: 'Started', 'Stopped', 'Disabled'
+ // RuntimeState - READ-ONLY; Indicates if trigger is running or not. Updated when Start/Stop APIs are called on the Trigger. Possible values include: 'TriggerRuntimeStateStarted', 'TriggerRuntimeStateStopped', 'TriggerRuntimeStateDisabled'
RuntimeState TriggerRuntimeState `json:"runtimeState,omitempty"`
// Annotations - List of tags that can be used for describing the trigger.
Annotations *[]interface{} `json:"annotations,omitempty"`
@@ -141121,7 +151957,7 @@ type ResponsysLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -141273,6 +152109,11 @@ func (rls ResponsysLinkedService) AsMarketoLinkedService() (*MarketoLinkedServic
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for ResponsysLinkedService.
+func (rls ResponsysLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for ResponsysLinkedService.
func (rls ResponsysLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -141809,7 +152650,7 @@ type ResponsysObjectDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -141945,6 +152786,11 @@ func (rod ResponsysObjectDataset) AsMarketoObjectDataset() (*MarketoObjectDatase
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for ResponsysObjectDataset.
+func (rod ResponsysObjectDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for ResponsysObjectDataset.
func (rod ResponsysObjectDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -142070,6 +152916,16 @@ func (rod ResponsysObjectDataset) AsSapCloudForCustomerResourceDataset() (*SapCl
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for ResponsysObjectDataset.
+func (rod ResponsysObjectDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for ResponsysObjectDataset.
+func (rod ResponsysObjectDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for ResponsysObjectDataset.
func (rod ResponsysObjectDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -142110,6 +152966,16 @@ func (rod ResponsysObjectDataset) AsRelationalTableDataset() (*RelationalTableDa
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for ResponsysObjectDataset.
+func (rod ResponsysObjectDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for ResponsysObjectDataset.
+func (rod ResponsysObjectDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for ResponsysObjectDataset.
func (rod ResponsysObjectDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -142225,6 +153091,11 @@ func (rod ResponsysObjectDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for ResponsysObjectDataset.
+func (rod ResponsysObjectDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for ResponsysObjectDataset.
func (rod ResponsysObjectDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -142235,6 +153106,11 @@ func (rod ResponsysObjectDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for ResponsysObjectDataset.
+func (rod ResponsysObjectDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for ResponsysObjectDataset.
func (rod ResponsysObjectDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -142370,7 +153246,7 @@ type ResponsysSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -142494,6 +153370,11 @@ func (rs ResponsysSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for ResponsysSource.
+func (rs ResponsysSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for ResponsysSource.
func (rs ResponsysSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -142714,6 +153595,11 @@ func (rs ResponsysSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for ResponsysSource.
+func (rs ResponsysSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for ResponsysSource.
func (rs ResponsysSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -142789,6 +153675,11 @@ func (rs ResponsysSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for ResponsysSource.
+func (rs ResponsysSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for ResponsysSource.
func (rs ResponsysSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -142799,6 +153690,11 @@ func (rs ResponsysSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for ResponsysSource.
+func (rs ResponsysSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for ResponsysSource.
func (rs ResponsysSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -142901,7 +153797,7 @@ type RestResourceDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -143037,6 +153933,11 @@ func (rrd RestResourceDataset) AsMarketoObjectDataset() (*MarketoObjectDataset,
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for RestResourceDataset.
+func (rrd RestResourceDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for RestResourceDataset.
func (rrd RestResourceDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -143162,6 +154063,16 @@ func (rrd RestResourceDataset) AsSapCloudForCustomerResourceDataset() (*SapCloud
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for RestResourceDataset.
+func (rrd RestResourceDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for RestResourceDataset.
+func (rrd RestResourceDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for RestResourceDataset.
func (rrd RestResourceDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -143202,6 +154113,16 @@ func (rrd RestResourceDataset) AsRelationalTableDataset() (*RelationalTableDatas
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for RestResourceDataset.
+func (rrd RestResourceDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for RestResourceDataset.
+func (rrd RestResourceDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for RestResourceDataset.
func (rrd RestResourceDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -143317,6 +154238,11 @@ func (rrd RestResourceDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for RestResourceDataset.
+func (rrd RestResourceDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for RestResourceDataset.
func (rrd RestResourceDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -143327,6 +154253,11 @@ func (rrd RestResourceDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for RestResourceDataset.
+func (rrd RestResourceDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for RestResourceDataset.
func (rrd RestResourceDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -143478,7 +154409,7 @@ type RestServiceLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -143630,6 +154561,11 @@ func (rsls RestServiceLinkedService) AsMarketoLinkedService() (*MarketoLinkedSer
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for RestServiceLinkedService.
+func (rsls RestServiceLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for RestServiceLinkedService.
func (rsls RestServiceLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -144180,6 +155116,14 @@ func (rslstp *RestServiceLinkedServiceTypeProperties) UnmarshalJSON(body []byte)
// RestSource a copy activity Rest service source.
type RestSource struct {
+ // RequestMethod - The HTTP method used to call the RESTful API. The default is GET. Type: string (or Expression with resultType string).
+ RequestMethod interface{} `json:"requestMethod,omitempty"`
+ // RequestBody - The HTTP request body to the RESTful API if requestMethod is POST. Type: string (or Expression with resultType string).
+ RequestBody interface{} `json:"requestBody,omitempty"`
+ // AdditionalHeaders - The additional HTTP headers in the request to the RESTful API. Type: string (or Expression with resultType string).
+ AdditionalHeaders interface{} `json:"additionalHeaders,omitempty"`
+ // PaginationRules - The pagination rules to compose next page requests. Type: string (or Expression with resultType string).
+ PaginationRules interface{} `json:"paginationRules,omitempty"`
// HTTPRequestTimeout - The timeout (TimeSpan) to get an HTTP response. It is the timeout to get a response, not the timeout to read response data. Default value: 00:01:40. Type: string (or Expression with resultType string), pattern: ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
HTTPRequestTimeout interface{} `json:"httpRequestTimeout,omitempty"`
// RequestInterval - The time to await before sending next page request.
@@ -144192,7 +155136,7 @@ type RestSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -144200,6 +155144,18 @@ type RestSource struct {
func (rs RestSource) MarshalJSON() ([]byte, error) {
rs.Type = TypeRestSource
objectMap := make(map[string]interface{})
+ if rs.RequestMethod != nil {
+ objectMap["requestMethod"] = rs.RequestMethod
+ }
+ if rs.RequestBody != nil {
+ objectMap["requestBody"] = rs.RequestBody
+ }
+ if rs.AdditionalHeaders != nil {
+ objectMap["additionalHeaders"] = rs.AdditionalHeaders
+ }
+ if rs.PaginationRules != nil {
+ objectMap["paginationRules"] = rs.PaginationRules
+ }
if rs.HTTPRequestTimeout != nil {
objectMap["httpRequestTimeout"] = rs.HTTPRequestTimeout
}
@@ -144319,6 +155275,11 @@ func (rs RestSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for RestSource.
+func (rs RestSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for RestSource.
func (rs RestSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -144539,6 +155500,11 @@ func (rs RestSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for RestSource.
+func (rs RestSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for RestSource.
func (rs RestSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -144614,6 +155580,11 @@ func (rs RestSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for RestSource.
+func (rs RestSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for RestSource.
func (rs RestSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -144624,6 +155595,11 @@ func (rs RestSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for RestSource.
+func (rs RestSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for RestSource.
func (rs RestSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -144643,6 +155619,42 @@ func (rs *RestSource) UnmarshalJSON(body []byte) error {
}
for k, v := range m {
switch k {
+ case "requestMethod":
+ if v != nil {
+ var requestMethod interface{}
+ err = json.Unmarshal(*v, &requestMethod)
+ if err != nil {
+ return err
+ }
+ rs.RequestMethod = requestMethod
+ }
+ case "requestBody":
+ if v != nil {
+ var requestBody interface{}
+ err = json.Unmarshal(*v, &requestBody)
+ if err != nil {
+ return err
+ }
+ rs.RequestBody = requestBody
+ }
+ case "additionalHeaders":
+ if v != nil {
+ var additionalHeaders interface{}
+ err = json.Unmarshal(*v, &additionalHeaders)
+ if err != nil {
+ return err
+ }
+ rs.AdditionalHeaders = additionalHeaders
+ }
+ case "paginationRules":
+ if v != nil {
+ var paginationRules interface{}
+ err = json.Unmarshal(*v, &paginationRules)
+ if err != nil {
+ return err
+ }
+ rs.PaginationRules = paginationRules
+ }
case "httpRequestTimeout":
if v != nil {
var HTTPRequestTimeout interface{}
@@ -144769,7 +155781,7 @@ type SalesforceLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -144921,6 +155933,11 @@ func (sls SalesforceLinkedService) AsMarketoLinkedService() (*MarketoLinkedServi
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for SalesforceLinkedService.
+func (sls SalesforceLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for SalesforceLinkedService.
func (sls SalesforceLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -145428,7 +156445,7 @@ type SalesforceMarketingCloudLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -145580,6 +156597,11 @@ func (smcls SalesforceMarketingCloudLinkedService) AsMarketoLinkedService() (*Ma
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for SalesforceMarketingCloudLinkedService.
+func (smcls SalesforceMarketingCloudLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for SalesforceMarketingCloudLinkedService.
func (smcls SalesforceMarketingCloudLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -146106,7 +157128,7 @@ type SalesforceMarketingCloudObjectDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -146242,6 +157264,11 @@ func (smcod SalesforceMarketingCloudObjectDataset) AsMarketoObjectDataset() (*Ma
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for SalesforceMarketingCloudObjectDataset.
+func (smcod SalesforceMarketingCloudObjectDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for SalesforceMarketingCloudObjectDataset.
func (smcod SalesforceMarketingCloudObjectDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -146367,6 +157394,16 @@ func (smcod SalesforceMarketingCloudObjectDataset) AsSapCloudForCustomerResource
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for SalesforceMarketingCloudObjectDataset.
+func (smcod SalesforceMarketingCloudObjectDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for SalesforceMarketingCloudObjectDataset.
+func (smcod SalesforceMarketingCloudObjectDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for SalesforceMarketingCloudObjectDataset.
func (smcod SalesforceMarketingCloudObjectDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -146407,6 +157444,16 @@ func (smcod SalesforceMarketingCloudObjectDataset) AsRelationalTableDataset() (*
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for SalesforceMarketingCloudObjectDataset.
+func (smcod SalesforceMarketingCloudObjectDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for SalesforceMarketingCloudObjectDataset.
+func (smcod SalesforceMarketingCloudObjectDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for SalesforceMarketingCloudObjectDataset.
func (smcod SalesforceMarketingCloudObjectDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -146522,6 +157569,11 @@ func (smcod SalesforceMarketingCloudObjectDataset) AsBinaryDataset() (*BinaryDat
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for SalesforceMarketingCloudObjectDataset.
+func (smcod SalesforceMarketingCloudObjectDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for SalesforceMarketingCloudObjectDataset.
func (smcod SalesforceMarketingCloudObjectDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -146532,6 +157584,11 @@ func (smcod SalesforceMarketingCloudObjectDataset) AsParquetDataset() (*ParquetD
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for SalesforceMarketingCloudObjectDataset.
+func (smcod SalesforceMarketingCloudObjectDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for SalesforceMarketingCloudObjectDataset.
func (smcod SalesforceMarketingCloudObjectDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -146667,7 +157724,7 @@ type SalesforceMarketingCloudSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -146791,6 +157848,11 @@ func (smcs SalesforceMarketingCloudSource) AsMarketoSource() (*MarketoSource, bo
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for SalesforceMarketingCloudSource.
+func (smcs SalesforceMarketingCloudSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for SalesforceMarketingCloudSource.
func (smcs SalesforceMarketingCloudSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -147011,6 +158073,11 @@ func (smcs SalesforceMarketingCloudSource) AsODataSource() (*ODataSource, bool)
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for SalesforceMarketingCloudSource.
+func (smcs SalesforceMarketingCloudSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for SalesforceMarketingCloudSource.
func (smcs SalesforceMarketingCloudSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -147086,6 +158153,11 @@ func (smcs SalesforceMarketingCloudSource) AsBinarySource() (*BinarySource, bool
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for SalesforceMarketingCloudSource.
+func (smcs SalesforceMarketingCloudSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for SalesforceMarketingCloudSource.
func (smcs SalesforceMarketingCloudSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -147096,6 +158168,11 @@ func (smcs SalesforceMarketingCloudSource) AsParquetSource() (*ParquetSource, bo
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for SalesforceMarketingCloudSource.
+func (smcs SalesforceMarketingCloudSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for SalesforceMarketingCloudSource.
func (smcs SalesforceMarketingCloudSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -147198,7 +158275,7 @@ type SalesforceObjectDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -147334,6 +158411,11 @@ func (sod SalesforceObjectDataset) AsMarketoObjectDataset() (*MarketoObjectDatas
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for SalesforceObjectDataset.
+func (sod SalesforceObjectDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for SalesforceObjectDataset.
func (sod SalesforceObjectDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -147459,6 +158541,16 @@ func (sod SalesforceObjectDataset) AsSapCloudForCustomerResourceDataset() (*SapC
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for SalesforceObjectDataset.
+func (sod SalesforceObjectDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for SalesforceObjectDataset.
+func (sod SalesforceObjectDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for SalesforceObjectDataset.
func (sod SalesforceObjectDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -147499,6 +158591,16 @@ func (sod SalesforceObjectDataset) AsRelationalTableDataset() (*RelationalTableD
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for SalesforceObjectDataset.
+func (sod SalesforceObjectDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for SalesforceObjectDataset.
+func (sod SalesforceObjectDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for SalesforceObjectDataset.
func (sod SalesforceObjectDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -147614,6 +158716,11 @@ func (sod SalesforceObjectDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for SalesforceObjectDataset.
+func (sod SalesforceObjectDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for SalesforceObjectDataset.
func (sod SalesforceObjectDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -147624,6 +158731,11 @@ func (sod SalesforceObjectDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for SalesforceObjectDataset.
+func (sod SalesforceObjectDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for SalesforceObjectDataset.
func (sod SalesforceObjectDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -147767,7 +158879,7 @@ type SalesforceServiceCloudLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -147919,6 +159031,11 @@ func (sscls SalesforceServiceCloudLinkedService) AsMarketoLinkedService() (*Mark
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for SalesforceServiceCloudLinkedService.
+func (sscls SalesforceServiceCloudLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for SalesforceServiceCloudLinkedService.
func (sscls SalesforceServiceCloudLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -148443,7 +159560,7 @@ type SalesforceServiceCloudObjectDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -148579,6 +159696,11 @@ func (sscod SalesforceServiceCloudObjectDataset) AsMarketoObjectDataset() (*Mark
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for SalesforceServiceCloudObjectDataset.
+func (sscod SalesforceServiceCloudObjectDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for SalesforceServiceCloudObjectDataset.
func (sscod SalesforceServiceCloudObjectDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -148704,6 +159826,16 @@ func (sscod SalesforceServiceCloudObjectDataset) AsSapCloudForCustomerResourceDa
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for SalesforceServiceCloudObjectDataset.
+func (sscod SalesforceServiceCloudObjectDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for SalesforceServiceCloudObjectDataset.
+func (sscod SalesforceServiceCloudObjectDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for SalesforceServiceCloudObjectDataset.
func (sscod SalesforceServiceCloudObjectDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return &sscod, true
@@ -148744,6 +159876,16 @@ func (sscod SalesforceServiceCloudObjectDataset) AsRelationalTableDataset() (*Re
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for SalesforceServiceCloudObjectDataset.
+func (sscod SalesforceServiceCloudObjectDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for SalesforceServiceCloudObjectDataset.
+func (sscod SalesforceServiceCloudObjectDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for SalesforceServiceCloudObjectDataset.
func (sscod SalesforceServiceCloudObjectDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -148859,6 +160001,11 @@ func (sscod SalesforceServiceCloudObjectDataset) AsBinaryDataset() (*BinaryDatas
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for SalesforceServiceCloudObjectDataset.
+func (sscod SalesforceServiceCloudObjectDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for SalesforceServiceCloudObjectDataset.
func (sscod SalesforceServiceCloudObjectDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -148869,6 +160016,11 @@ func (sscod SalesforceServiceCloudObjectDataset) AsParquetDataset() (*ParquetDat
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for SalesforceServiceCloudObjectDataset.
+func (sscod SalesforceServiceCloudObjectDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for SalesforceServiceCloudObjectDataset.
func (sscod SalesforceServiceCloudObjectDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -149018,7 +160170,7 @@ type SalesforceServiceCloudSink struct {
SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzurePostgreSQLSink', 'TypeDelimitedTextSink'
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
Type TypeBasicCopySink `json:"type,omitempty"`
}
@@ -149179,6 +160331,11 @@ func (sscs SalesforceServiceCloudSink) AsParquetSink() (*ParquetSink, bool) {
return nil, false
}
+// AsAvroSink is the BasicCopySink implementation for SalesforceServiceCloudSink.
+func (sscs SalesforceServiceCloudSink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
// AsAzureTableSink is the BasicCopySink implementation for SalesforceServiceCloudSink.
func (sscs SalesforceServiceCloudSink) AsAzureTableSink() (*AzureTableSink, bool) {
return nil, false
@@ -149194,11 +160351,21 @@ func (sscs SalesforceServiceCloudSink) AsSapCloudForCustomerSink() (*SapCloudFor
return nil, false
}
+// AsAzureMySQLSink is the BasicCopySink implementation for SalesforceServiceCloudSink.
+func (sscs SalesforceServiceCloudSink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
// AsAzurePostgreSQLSink is the BasicCopySink implementation for SalesforceServiceCloudSink.
func (sscs SalesforceServiceCloudSink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
return nil, false
}
+// AsJSONSink is the BasicCopySink implementation for SalesforceServiceCloudSink.
+func (sscs SalesforceServiceCloudSink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSink is the BasicCopySink implementation for SalesforceServiceCloudSink.
func (sscs SalesforceServiceCloudSink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
return nil, false
@@ -149336,7 +160503,7 @@ type SalesforceServiceCloudSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -149463,6 +160630,11 @@ func (sscs SalesforceServiceCloudSource) AsMarketoSource() (*MarketoSource, bool
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for SalesforceServiceCloudSource.
+func (sscs SalesforceServiceCloudSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for SalesforceServiceCloudSource.
func (sscs SalesforceServiceCloudSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -149683,6 +160855,11 @@ func (sscs SalesforceServiceCloudSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for SalesforceServiceCloudSource.
+func (sscs SalesforceServiceCloudSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for SalesforceServiceCloudSource.
func (sscs SalesforceServiceCloudSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -149758,6 +160935,11 @@ func (sscs SalesforceServiceCloudSource) AsBinarySource() (*BinarySource, bool)
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for SalesforceServiceCloudSource.
+func (sscs SalesforceServiceCloudSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for SalesforceServiceCloudSource.
func (sscs SalesforceServiceCloudSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -149768,6 +160950,11 @@ func (sscs SalesforceServiceCloudSource) AsParquetSource() (*ParquetSource, bool
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for SalesforceServiceCloudSource.
+func (sscs SalesforceServiceCloudSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for SalesforceServiceCloudSource.
func (sscs SalesforceServiceCloudSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -149879,7 +161066,7 @@ type SalesforceSink struct {
SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzurePostgreSQLSink', 'TypeDelimitedTextSink'
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
Type TypeBasicCopySink `json:"type,omitempty"`
}
@@ -150040,6 +161227,11 @@ func (ss SalesforceSink) AsParquetSink() (*ParquetSink, bool) {
return nil, false
}
+// AsAvroSink is the BasicCopySink implementation for SalesforceSink.
+func (ss SalesforceSink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
// AsAzureTableSink is the BasicCopySink implementation for SalesforceSink.
func (ss SalesforceSink) AsAzureTableSink() (*AzureTableSink, bool) {
return nil, false
@@ -150055,11 +161247,21 @@ func (ss SalesforceSink) AsSapCloudForCustomerSink() (*SapCloudForCustomerSink,
return nil, false
}
+// AsAzureMySQLSink is the BasicCopySink implementation for SalesforceSink.
+func (ss SalesforceSink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
// AsAzurePostgreSQLSink is the BasicCopySink implementation for SalesforceSink.
func (ss SalesforceSink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
return nil, false
}
+// AsJSONSink is the BasicCopySink implementation for SalesforceSink.
+func (ss SalesforceSink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSink is the BasicCopySink implementation for SalesforceSink.
func (ss SalesforceSink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
return nil, false
@@ -150197,7 +161399,7 @@ type SalesforceSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -150324,6 +161526,11 @@ func (ss SalesforceSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for SalesforceSource.
+func (ss SalesforceSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for SalesforceSource.
func (ss SalesforceSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -150544,6 +161751,11 @@ func (ss SalesforceSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for SalesforceSource.
+func (ss SalesforceSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for SalesforceSource.
func (ss SalesforceSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -150619,6 +161831,11 @@ func (ss SalesforceSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for SalesforceSource.
+func (ss SalesforceSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for SalesforceSource.
func (ss SalesforceSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -150629,6 +161846,11 @@ func (ss SalesforceSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for SalesforceSource.
+func (ss SalesforceSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for SalesforceSource.
func (ss SalesforceSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -150720,6 +161942,596 @@ func (ss *SalesforceSource) UnmarshalJSON(body []byte) error {
return nil
}
+// SapBwCubeDataset the SAP BW cube dataset.
+type SapBwCubeDataset struct {
+ // AdditionalProperties - Unmatched properties from the message are deserialized this collection
+ AdditionalProperties map[string]interface{} `json:""`
+ // Description - Dataset description.
+ Description *string `json:"description,omitempty"`
+ // Structure - Columns that define the structure of the dataset. Type: array (or Expression with resultType array), itemType: DatasetDataElement.
+ Structure interface{} `json:"structure,omitempty"`
+ // Schema - Columns that define the physical type schema of the dataset. Type: array (or Expression with resultType array), itemType: DatasetSchemaDataElement.
+ Schema interface{} `json:"schema,omitempty"`
+ // LinkedServiceName - Linked service reference.
+ LinkedServiceName *LinkedServiceReference `json:"linkedServiceName,omitempty"`
+ // Parameters - Parameters for dataset.
+ Parameters map[string]*ParameterSpecification `json:"parameters"`
+ // Annotations - List of tags that can be used for describing the Dataset.
+ Annotations *[]interface{} `json:"annotations,omitempty"`
+ // Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
+ Folder *DatasetFolder `json:"folder,omitempty"`
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
+ Type TypeBasicDataset `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) MarshalJSON() ([]byte, error) {
+ sbcd.Type = TypeSapBwCube
+ objectMap := make(map[string]interface{})
+ if sbcd.Description != nil {
+ objectMap["description"] = sbcd.Description
+ }
+ if sbcd.Structure != nil {
+ objectMap["structure"] = sbcd.Structure
+ }
+ if sbcd.Schema != nil {
+ objectMap["schema"] = sbcd.Schema
+ }
+ if sbcd.LinkedServiceName != nil {
+ objectMap["linkedServiceName"] = sbcd.LinkedServiceName
+ }
+ if sbcd.Parameters != nil {
+ objectMap["parameters"] = sbcd.Parameters
+ }
+ if sbcd.Annotations != nil {
+ objectMap["annotations"] = sbcd.Annotations
+ }
+ if sbcd.Folder != nil {
+ objectMap["folder"] = sbcd.Folder
+ }
+ if sbcd.Type != "" {
+ objectMap["type"] = sbcd.Type
+ }
+ for k, v := range sbcd.AdditionalProperties {
+ objectMap[k] = v
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsGoogleAdWordsObjectDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsGoogleAdWordsObjectDataset() (*GoogleAdWordsObjectDataset, bool) {
+ return nil, false
+}
+
+// AsAzureDataExplorerTableDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsAzureDataExplorerTableDataset() (*AzureDataExplorerTableDataset, bool) {
+ return nil, false
+}
+
+// AsOracleServiceCloudObjectDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsOracleServiceCloudObjectDataset() (*OracleServiceCloudObjectDataset, bool) {
+ return nil, false
+}
+
+// AsDynamicsAXResourceDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsDynamicsAXResourceDataset() (*DynamicsAXResourceDataset, bool) {
+ return nil, false
+}
+
+// AsResponsysObjectDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsResponsysObjectDataset() (*ResponsysObjectDataset, bool) {
+ return nil, false
+}
+
+// AsSalesforceMarketingCloudObjectDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsSalesforceMarketingCloudObjectDataset() (*SalesforceMarketingCloudObjectDataset, bool) {
+ return nil, false
+}
+
+// AsVerticaTableDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsVerticaTableDataset() (*VerticaTableDataset, bool) {
+ return nil, false
+}
+
+// AsNetezzaTableDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsNetezzaTableDataset() (*NetezzaTableDataset, bool) {
+ return nil, false
+}
+
+// AsZohoObjectDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsZohoObjectDataset() (*ZohoObjectDataset, bool) {
+ return nil, false
+}
+
+// AsXeroObjectDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsXeroObjectDataset() (*XeroObjectDataset, bool) {
+ return nil, false
+}
+
+// AsSquareObjectDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsSquareObjectDataset() (*SquareObjectDataset, bool) {
+ return nil, false
+}
+
+// AsSparkObjectDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsSparkObjectDataset() (*SparkObjectDataset, bool) {
+ return nil, false
+}
+
+// AsShopifyObjectDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsShopifyObjectDataset() (*ShopifyObjectDataset, bool) {
+ return nil, false
+}
+
+// AsServiceNowObjectDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsServiceNowObjectDataset() (*ServiceNowObjectDataset, bool) {
+ return nil, false
+}
+
+// AsQuickBooksObjectDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsQuickBooksObjectDataset() (*QuickBooksObjectDataset, bool) {
+ return nil, false
+}
+
+// AsPrestoObjectDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsPrestoObjectDataset() (*PrestoObjectDataset, bool) {
+ return nil, false
+}
+
+// AsPhoenixObjectDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsPhoenixObjectDataset() (*PhoenixObjectDataset, bool) {
+ return nil, false
+}
+
+// AsPaypalObjectDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsPaypalObjectDataset() (*PaypalObjectDataset, bool) {
+ return nil, false
+}
+
+// AsMarketoObjectDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsMarketoObjectDataset() (*MarketoObjectDataset, bool) {
+ return nil, false
+}
+
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
+// AsMariaDBTableDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
+ return nil, false
+}
+
+// AsMagentoObjectDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsMagentoObjectDataset() (*MagentoObjectDataset, bool) {
+ return nil, false
+}
+
+// AsJiraObjectDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsJiraObjectDataset() (*JiraObjectDataset, bool) {
+ return nil, false
+}
+
+// AsImpalaObjectDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsImpalaObjectDataset() (*ImpalaObjectDataset, bool) {
+ return nil, false
+}
+
+// AsHubspotObjectDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsHubspotObjectDataset() (*HubspotObjectDataset, bool) {
+ return nil, false
+}
+
+// AsHiveObjectDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsHiveObjectDataset() (*HiveObjectDataset, bool) {
+ return nil, false
+}
+
+// AsHBaseObjectDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsHBaseObjectDataset() (*HBaseObjectDataset, bool) {
+ return nil, false
+}
+
+// AsGreenplumTableDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsGreenplumTableDataset() (*GreenplumTableDataset, bool) {
+ return nil, false
+}
+
+// AsGoogleBigQueryObjectDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsGoogleBigQueryObjectDataset() (*GoogleBigQueryObjectDataset, bool) {
+ return nil, false
+}
+
+// AsEloquaObjectDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsEloquaObjectDataset() (*EloquaObjectDataset, bool) {
+ return nil, false
+}
+
+// AsDrillTableDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsDrillTableDataset() (*DrillTableDataset, bool) {
+ return nil, false
+}
+
+// AsCouchbaseTableDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsCouchbaseTableDataset() (*CouchbaseTableDataset, bool) {
+ return nil, false
+}
+
+// AsConcurObjectDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsConcurObjectDataset() (*ConcurObjectDataset, bool) {
+ return nil, false
+}
+
+// AsAzurePostgreSQLTableDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsAzurePostgreSQLTableDataset() (*AzurePostgreSQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonMWSObjectDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsAmazonMWSObjectDataset() (*AmazonMWSObjectDataset, bool) {
+ return nil, false
+}
+
+// AsHTTPDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsHTTPDataset() (*HTTPDataset, bool) {
+ return nil, false
+}
+
+// AsAzureSearchIndexDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsAzureSearchIndexDataset() (*AzureSearchIndexDataset, bool) {
+ return nil, false
+}
+
+// AsWebTableDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsWebTableDataset() (*WebTableDataset, bool) {
+ return nil, false
+}
+
+// AsSapTableResourceDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsSapTableResourceDataset() (*SapTableResourceDataset, bool) {
+ return nil, false
+}
+
+// AsRestResourceDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsRestResourceDataset() (*RestResourceDataset, bool) {
+ return nil, false
+}
+
+// AsSQLServerTableDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsSQLServerTableDataset() (*SQLServerTableDataset, bool) {
+ return nil, false
+}
+
+// AsSapOpenHubTableDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsSapOpenHubTableDataset() (*SapOpenHubTableDataset, bool) {
+ return nil, false
+}
+
+// AsSapHanaTableDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsSapHanaTableDataset() (*SapHanaTableDataset, bool) {
+ return nil, false
+}
+
+// AsSapEccResourceDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsSapEccResourceDataset() (*SapEccResourceDataset, bool) {
+ return nil, false
+}
+
+// AsSapCloudForCustomerResourceDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsSapCloudForCustomerResourceDataset() (*SapCloudForCustomerResourceDataset, bool) {
+ return nil, false
+}
+
+// AsSapBwCubeDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return &sbcd, true
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
+// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
+ return nil, false
+}
+
+// AsSalesforceObjectDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsSalesforceObjectDataset() (*SalesforceObjectDataset, bool) {
+ return nil, false
+}
+
+// AsMicrosoftAccessTableDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsMicrosoftAccessTableDataset() (*MicrosoftAccessTableDataset, bool) {
+ return nil, false
+}
+
+// AsPostgreSQLTableDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsPostgreSQLTableDataset() (*PostgreSQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsMySQLTableDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsMySQLTableDataset() (*MySQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsOdbcTableDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsOdbcTableDataset() (*OdbcTableDataset, bool) {
+ return nil, false
+}
+
+// AsInformixTableDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsInformixTableDataset() (*InformixTableDataset, bool) {
+ return nil, false
+}
+
+// AsRelationalTableDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsRelationalTableDataset() (*RelationalTableDataset, bool) {
+ return nil, false
+}
+
+// AsDb2TableDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureMySQLTableDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsTeradataTableDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsTeradataTableDataset() (*TeradataTableDataset, bool) {
+ return nil, false
+}
+
+// AsOracleTableDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsOracleTableDataset() (*OracleTableDataset, bool) {
+ return nil, false
+}
+
+// AsODataResourceDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsODataResourceDataset() (*ODataResourceDataset, bool) {
+ return nil, false
+}
+
+// AsCosmosDbMongoDbAPICollectionDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsCosmosDbMongoDbAPICollectionDataset() (*CosmosDbMongoDbAPICollectionDataset, bool) {
+ return nil, false
+}
+
+// AsMongoDbV2CollectionDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsMongoDbV2CollectionDataset() (*MongoDbV2CollectionDataset, bool) {
+ return nil, false
+}
+
+// AsMongoDbCollectionDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsMongoDbCollectionDataset() (*MongoDbCollectionDataset, bool) {
+ return nil, false
+}
+
+// AsFileShareDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsFileShareDataset() (*FileShareDataset, bool) {
+ return nil, false
+}
+
+// AsOffice365Dataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsOffice365Dataset() (*Office365Dataset, bool) {
+ return nil, false
+}
+
+// AsAzureBlobFSDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsAzureBlobFSDataset() (*AzureBlobFSDataset, bool) {
+ return nil, false
+}
+
+// AsAzureDataLakeStoreDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsAzureDataLakeStoreDataset() (*AzureDataLakeStoreDataset, bool) {
+ return nil, false
+}
+
+// AsCommonDataServiceForAppsEntityDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsCommonDataServiceForAppsEntityDataset() (*CommonDataServiceForAppsEntityDataset, bool) {
+ return nil, false
+}
+
+// AsDynamicsCrmEntityDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsDynamicsCrmEntityDataset() (*DynamicsCrmEntityDataset, bool) {
+ return nil, false
+}
+
+// AsDynamicsEntityDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsDynamicsEntityDataset() (*DynamicsEntityDataset, bool) {
+ return nil, false
+}
+
+// AsDocumentDbCollectionDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsDocumentDbCollectionDataset() (*DocumentDbCollectionDataset, bool) {
+ return nil, false
+}
+
+// AsCustomDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsCustomDataset() (*CustomDataset, bool) {
+ return nil, false
+}
+
+// AsCassandraTableDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsCassandraTableDataset() (*CassandraTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureSQLDWTableDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsAzureSQLDWTableDataset() (*AzureSQLDWTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureSQLMITableDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsAzureSQLMITableDataset() (*AzureSQLMITableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureSQLTableDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsAzureSQLTableDataset() (*AzureSQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureTableDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsAzureTableDataset() (*AzureTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureBlobDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsAzureBlobDataset() (*AzureBlobDataset, bool) {
+ return nil, false
+}
+
+// AsBinaryDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsBinaryDataset() (*BinaryDataset, bool) {
+ return nil, false
+}
+
+// AsJSONDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
+// AsDelimitedTextDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
+ return nil, false
+}
+
+// AsParquetDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsParquetDataset() (*ParquetDataset, bool) {
+ return nil, false
+}
+
+// AsAvroDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonS3Dataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
+ return nil, false
+}
+
+// AsDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsDataset() (*Dataset, bool) {
+ return nil, false
+}
+
+// AsBasicDataset is the BasicDataset implementation for SapBwCubeDataset.
+func (sbcd SapBwCubeDataset) AsBasicDataset() (BasicDataset, bool) {
+ return &sbcd, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for SapBwCubeDataset struct.
+func (sbcd *SapBwCubeDataset) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ default:
+ if v != nil {
+ var additionalProperties interface{}
+ err = json.Unmarshal(*v, &additionalProperties)
+ if err != nil {
+ return err
+ }
+ if sbcd.AdditionalProperties == nil {
+ sbcd.AdditionalProperties = make(map[string]interface{})
+ }
+ sbcd.AdditionalProperties[k] = additionalProperties
+ }
+ case "description":
+ if v != nil {
+ var description string
+ err = json.Unmarshal(*v, &description)
+ if err != nil {
+ return err
+ }
+ sbcd.Description = &description
+ }
+ case "structure":
+ if v != nil {
+ var structure interface{}
+ err = json.Unmarshal(*v, &structure)
+ if err != nil {
+ return err
+ }
+ sbcd.Structure = structure
+ }
+ case "schema":
+ if v != nil {
+ var schema interface{}
+ err = json.Unmarshal(*v, &schema)
+ if err != nil {
+ return err
+ }
+ sbcd.Schema = schema
+ }
+ case "linkedServiceName":
+ if v != nil {
+ var linkedServiceName LinkedServiceReference
+ err = json.Unmarshal(*v, &linkedServiceName)
+ if err != nil {
+ return err
+ }
+ sbcd.LinkedServiceName = &linkedServiceName
+ }
+ case "parameters":
+ if v != nil {
+ var parameters map[string]*ParameterSpecification
+ err = json.Unmarshal(*v, ¶meters)
+ if err != nil {
+ return err
+ }
+ sbcd.Parameters = parameters
+ }
+ case "annotations":
+ if v != nil {
+ var annotations []interface{}
+ err = json.Unmarshal(*v, &annotations)
+ if err != nil {
+ return err
+ }
+ sbcd.Annotations = &annotations
+ }
+ case "folder":
+ if v != nil {
+ var folder DatasetFolder
+ err = json.Unmarshal(*v, &folder)
+ if err != nil {
+ return err
+ }
+ sbcd.Folder = &folder
+ }
+ case "type":
+ if v != nil {
+ var typeVar TypeBasicDataset
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ sbcd.Type = typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
// SapBWLinkedService SAP Business Warehouse Linked Service.
type SapBWLinkedService struct {
// SapBWLinkedServiceTypeProperties - Properties specific to this linked service type.
@@ -150734,7 +162546,7 @@ type SapBWLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -150886,6 +162698,11 @@ func (sbls SapBWLinkedService) AsMarketoLinkedService() (*MarketoLinkedService,
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for SapBWLinkedService.
+func (sbls SapBWLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for SapBWLinkedService.
func (sbls SapBWLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -151391,6 +163208,549 @@ func (sblstp *SapBWLinkedServiceTypeProperties) UnmarshalJSON(body []byte) error
return nil
}
+// SapBwSource a copy activity source for SapBW server via MDX.
+type SapBwSource struct {
+ // Query - MDX query. Type: string (or Expression with resultType string).
+ Query interface{} `json:"query,omitempty"`
+ // AdditionalProperties - Unmatched properties from the message are deserialized this collection
+ AdditionalProperties map[string]interface{} `json:""`
+ // SourceRetryCount - Source retry count. Type: integer (or Expression with resultType integer).
+ SourceRetryCount interface{} `json:"sourceRetryCount,omitempty"`
+ // SourceRetryWait - Source retry wait. Type: string (or Expression with resultType string), pattern: ((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
+ SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
+ // MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
+ MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
+ Type TypeBasicCopySource `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for SapBwSource.
+func (sbs SapBwSource) MarshalJSON() ([]byte, error) {
+ sbs.Type = TypeSapBwSource
+ objectMap := make(map[string]interface{})
+ if sbs.Query != nil {
+ objectMap["query"] = sbs.Query
+ }
+ if sbs.SourceRetryCount != nil {
+ objectMap["sourceRetryCount"] = sbs.SourceRetryCount
+ }
+ if sbs.SourceRetryWait != nil {
+ objectMap["sourceRetryWait"] = sbs.SourceRetryWait
+ }
+ if sbs.MaxConcurrentConnections != nil {
+ objectMap["maxConcurrentConnections"] = sbs.MaxConcurrentConnections
+ }
+ if sbs.Type != "" {
+ objectMap["type"] = sbs.Type
+ }
+ for k, v := range sbs.AdditionalProperties {
+ objectMap[k] = v
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsAmazonRedshiftSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsAmazonRedshiftSource() (*AmazonRedshiftSource, bool) {
+ return nil, false
+}
+
+// AsGoogleAdWordsSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsGoogleAdWordsSource() (*GoogleAdWordsSource, bool) {
+ return nil, false
+}
+
+// AsOracleServiceCloudSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsOracleServiceCloudSource() (*OracleServiceCloudSource, bool) {
+ return nil, false
+}
+
+// AsDynamicsAXSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsDynamicsAXSource() (*DynamicsAXSource, bool) {
+ return nil, false
+}
+
+// AsResponsysSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsResponsysSource() (*ResponsysSource, bool) {
+ return nil, false
+}
+
+// AsSalesforceMarketingCloudSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsSalesforceMarketingCloudSource() (*SalesforceMarketingCloudSource, bool) {
+ return nil, false
+}
+
+// AsVerticaSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsVerticaSource() (*VerticaSource, bool) {
+ return nil, false
+}
+
+// AsNetezzaSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsNetezzaSource() (*NetezzaSource, bool) {
+ return nil, false
+}
+
+// AsZohoSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsZohoSource() (*ZohoSource, bool) {
+ return nil, false
+}
+
+// AsXeroSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsXeroSource() (*XeroSource, bool) {
+ return nil, false
+}
+
+// AsSquareSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsSquareSource() (*SquareSource, bool) {
+ return nil, false
+}
+
+// AsSparkSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsSparkSource() (*SparkSource, bool) {
+ return nil, false
+}
+
+// AsShopifySource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsShopifySource() (*ShopifySource, bool) {
+ return nil, false
+}
+
+// AsServiceNowSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsServiceNowSource() (*ServiceNowSource, bool) {
+ return nil, false
+}
+
+// AsQuickBooksSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsQuickBooksSource() (*QuickBooksSource, bool) {
+ return nil, false
+}
+
+// AsPrestoSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsPrestoSource() (*PrestoSource, bool) {
+ return nil, false
+}
+
+// AsPhoenixSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsPhoenixSource() (*PhoenixSource, bool) {
+ return nil, false
+}
+
+// AsPaypalSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsPaypalSource() (*PaypalSource, bool) {
+ return nil, false
+}
+
+// AsMarketoSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsMarketoSource() (*MarketoSource, bool) {
+ return nil, false
+}
+
+// AsAzureMariaDBSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
+// AsMariaDBSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsMariaDBSource() (*MariaDBSource, bool) {
+ return nil, false
+}
+
+// AsMagentoSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsMagentoSource() (*MagentoSource, bool) {
+ return nil, false
+}
+
+// AsJiraSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsJiraSource() (*JiraSource, bool) {
+ return nil, false
+}
+
+// AsImpalaSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsImpalaSource() (*ImpalaSource, bool) {
+ return nil, false
+}
+
+// AsHubspotSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsHubspotSource() (*HubspotSource, bool) {
+ return nil, false
+}
+
+// AsHiveSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsHiveSource() (*HiveSource, bool) {
+ return nil, false
+}
+
+// AsHBaseSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsHBaseSource() (*HBaseSource, bool) {
+ return nil, false
+}
+
+// AsGreenplumSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsGreenplumSource() (*GreenplumSource, bool) {
+ return nil, false
+}
+
+// AsGoogleBigQuerySource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsGoogleBigQuerySource() (*GoogleBigQuerySource, bool) {
+ return nil, false
+}
+
+// AsEloquaSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsEloquaSource() (*EloquaSource, bool) {
+ return nil, false
+}
+
+// AsDrillSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsDrillSource() (*DrillSource, bool) {
+ return nil, false
+}
+
+// AsCouchbaseSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsCouchbaseSource() (*CouchbaseSource, bool) {
+ return nil, false
+}
+
+// AsConcurSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsConcurSource() (*ConcurSource, bool) {
+ return nil, false
+}
+
+// AsAzurePostgreSQLSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsAzurePostgreSQLSource() (*AzurePostgreSQLSource, bool) {
+ return nil, false
+}
+
+// AsAmazonMWSSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsAmazonMWSSource() (*AmazonMWSSource, bool) {
+ return nil, false
+}
+
+// AsHTTPSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsHTTPSource() (*HTTPSource, bool) {
+ return nil, false
+}
+
+// AsAzureBlobFSSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsAzureBlobFSSource() (*AzureBlobFSSource, bool) {
+ return nil, false
+}
+
+// AsAzureDataLakeStoreSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsAzureDataLakeStoreSource() (*AzureDataLakeStoreSource, bool) {
+ return nil, false
+}
+
+// AsOffice365Source is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsOffice365Source() (*Office365Source, bool) {
+ return nil, false
+}
+
+// AsCosmosDbMongoDbAPISource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsCosmosDbMongoDbAPISource() (*CosmosDbMongoDbAPISource, bool) {
+ return nil, false
+}
+
+// AsMongoDbV2Source is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsMongoDbV2Source() (*MongoDbV2Source, bool) {
+ return nil, false
+}
+
+// AsMongoDbSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsMongoDbSource() (*MongoDbSource, bool) {
+ return nil, false
+}
+
+// AsCassandraSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsCassandraSource() (*CassandraSource, bool) {
+ return nil, false
+}
+
+// AsWebSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsWebSource() (*WebSource, bool) {
+ return nil, false
+}
+
+// AsTeradataSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsTeradataSource() (*TeradataSource, bool) {
+ return nil, false
+}
+
+// AsOracleSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsOracleSource() (*OracleSource, bool) {
+ return nil, false
+}
+
+// AsAzureDataExplorerSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsAzureDataExplorerSource() (*AzureDataExplorerSource, bool) {
+ return nil, false
+}
+
+// AsAzureMySQLSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsAzureMySQLSource() (*AzureMySQLSource, bool) {
+ return nil, false
+}
+
+// AsHdfsSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsHdfsSource() (*HdfsSource, bool) {
+ return nil, false
+}
+
+// AsFileSystemSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsFileSystemSource() (*FileSystemSource, bool) {
+ return nil, false
+}
+
+// AsSQLDWSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsSQLDWSource() (*SQLDWSource, bool) {
+ return nil, false
+}
+
+// AsSQLMISource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsSQLMISource() (*SQLMISource, bool) {
+ return nil, false
+}
+
+// AsAzureSQLSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsAzureSQLSource() (*AzureSQLSource, bool) {
+ return nil, false
+}
+
+// AsSQLServerSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsSQLServerSource() (*SQLServerSource, bool) {
+ return nil, false
+}
+
+// AsSQLSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsSQLSource() (*SQLSource, bool) {
+ return nil, false
+}
+
+// AsRestSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsRestSource() (*RestSource, bool) {
+ return nil, false
+}
+
+// AsSapTableSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsSapTableSource() (*SapTableSource, bool) {
+ return nil, false
+}
+
+// AsSapOpenHubSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsSapOpenHubSource() (*SapOpenHubSource, bool) {
+ return nil, false
+}
+
+// AsSapHanaSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsSapHanaSource() (*SapHanaSource, bool) {
+ return nil, false
+}
+
+// AsSapEccSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsSapEccSource() (*SapEccSource, bool) {
+ return nil, false
+}
+
+// AsSapCloudForCustomerSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsSapCloudForCustomerSource() (*SapCloudForCustomerSource, bool) {
+ return nil, false
+}
+
+// AsSalesforceServiceCloudSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsSalesforceServiceCloudSource() (*SalesforceServiceCloudSource, bool) {
+ return nil, false
+}
+
+// AsSalesforceSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsSalesforceSource() (*SalesforceSource, bool) {
+ return nil, false
+}
+
+// AsODataSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsODataSource() (*ODataSource, bool) {
+ return nil, false
+}
+
+// AsSapBwSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsSapBwSource() (*SapBwSource, bool) {
+ return &sbs, true
+}
+
+// AsSybaseSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsSybaseSource() (*SybaseSource, bool) {
+ return nil, false
+}
+
+// AsPostgreSQLSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsPostgreSQLSource() (*PostgreSQLSource, bool) {
+ return nil, false
+}
+
+// AsMySQLSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsMySQLSource() (*MySQLSource, bool) {
+ return nil, false
+}
+
+// AsOdbcSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsOdbcSource() (*OdbcSource, bool) {
+ return nil, false
+}
+
+// AsDb2Source is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsDb2Source() (*Db2Source, bool) {
+ return nil, false
+}
+
+// AsMicrosoftAccessSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsMicrosoftAccessSource() (*MicrosoftAccessSource, bool) {
+ return nil, false
+}
+
+// AsInformixSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsInformixSource() (*InformixSource, bool) {
+ return nil, false
+}
+
+// AsRelationalSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsRelationalSource() (*RelationalSource, bool) {
+ return nil, false
+}
+
+// AsCommonDataServiceForAppsSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsCommonDataServiceForAppsSource() (*CommonDataServiceForAppsSource, bool) {
+ return nil, false
+}
+
+// AsDynamicsCrmSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsDynamicsCrmSource() (*DynamicsCrmSource, bool) {
+ return nil, false
+}
+
+// AsDynamicsSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsDynamicsSource() (*DynamicsSource, bool) {
+ return nil, false
+}
+
+// AsDocumentDbCollectionSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsDocumentDbCollectionSource() (*DocumentDbCollectionSource, bool) {
+ return nil, false
+}
+
+// AsBlobSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsBlobSource() (*BlobSource, bool) {
+ return nil, false
+}
+
+// AsAzureTableSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsAzureTableSource() (*AzureTableSource, bool) {
+ return nil, false
+}
+
+// AsBinarySource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsBinarySource() (*BinarySource, bool) {
+ return nil, false
+}
+
+// AsJSONSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
+// AsDelimitedTextSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
+ return nil, false
+}
+
+// AsParquetSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsParquetSource() (*ParquetSource, bool) {
+ return nil, false
+}
+
+// AsAvroSource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
+// AsCopySource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsCopySource() (*CopySource, bool) {
+ return nil, false
+}
+
+// AsBasicCopySource is the BasicCopySource implementation for SapBwSource.
+func (sbs SapBwSource) AsBasicCopySource() (BasicCopySource, bool) {
+ return &sbs, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for SapBwSource struct.
+func (sbs *SapBwSource) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "query":
+ if v != nil {
+ var query interface{}
+ err = json.Unmarshal(*v, &query)
+ if err != nil {
+ return err
+ }
+ sbs.Query = query
+ }
+ default:
+ if v != nil {
+ var additionalProperties interface{}
+ err = json.Unmarshal(*v, &additionalProperties)
+ if err != nil {
+ return err
+ }
+ if sbs.AdditionalProperties == nil {
+ sbs.AdditionalProperties = make(map[string]interface{})
+ }
+ sbs.AdditionalProperties[k] = additionalProperties
+ }
+ case "sourceRetryCount":
+ if v != nil {
+ var sourceRetryCount interface{}
+ err = json.Unmarshal(*v, &sourceRetryCount)
+ if err != nil {
+ return err
+ }
+ sbs.SourceRetryCount = sourceRetryCount
+ }
+ case "sourceRetryWait":
+ if v != nil {
+ var sourceRetryWait interface{}
+ err = json.Unmarshal(*v, &sourceRetryWait)
+ if err != nil {
+ return err
+ }
+ sbs.SourceRetryWait = sourceRetryWait
+ }
+ case "maxConcurrentConnections":
+ if v != nil {
+ var maxConcurrentConnections interface{}
+ err = json.Unmarshal(*v, &maxConcurrentConnections)
+ if err != nil {
+ return err
+ }
+ sbs.MaxConcurrentConnections = maxConcurrentConnections
+ }
+ case "type":
+ if v != nil {
+ var typeVar TypeBasicCopySource
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ sbs.Type = typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
// SapCloudForCustomerLinkedService linked service for SAP Cloud for Customer.
type SapCloudForCustomerLinkedService struct {
// SapCloudForCustomerLinkedServiceTypeProperties - SAP Cloud for Customer linked service properties.
@@ -151405,7 +163765,7 @@ type SapCloudForCustomerLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -151557,6 +163917,11 @@ func (scfcls SapCloudForCustomerLinkedService) AsMarketoLinkedService() (*Market
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for SapCloudForCustomerLinkedService.
+func (scfcls SapCloudForCustomerLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for SapCloudForCustomerLinkedService.
func (scfcls SapCloudForCustomerLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -152060,7 +164425,7 @@ type SapCloudForCustomerResourceDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -152196,6 +164561,11 @@ func (scfcrd SapCloudForCustomerResourceDataset) AsMarketoObjectDataset() (*Mark
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for SapCloudForCustomerResourceDataset.
+func (scfcrd SapCloudForCustomerResourceDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for SapCloudForCustomerResourceDataset.
func (scfcrd SapCloudForCustomerResourceDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -152321,6 +164691,16 @@ func (scfcrd SapCloudForCustomerResourceDataset) AsSapCloudForCustomerResourceDa
return &scfcrd, true
}
+// AsSapBwCubeDataset is the BasicDataset implementation for SapCloudForCustomerResourceDataset.
+func (scfcrd SapCloudForCustomerResourceDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for SapCloudForCustomerResourceDataset.
+func (scfcrd SapCloudForCustomerResourceDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for SapCloudForCustomerResourceDataset.
func (scfcrd SapCloudForCustomerResourceDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -152361,6 +164741,16 @@ func (scfcrd SapCloudForCustomerResourceDataset) AsRelationalTableDataset() (*Re
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for SapCloudForCustomerResourceDataset.
+func (scfcrd SapCloudForCustomerResourceDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for SapCloudForCustomerResourceDataset.
+func (scfcrd SapCloudForCustomerResourceDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for SapCloudForCustomerResourceDataset.
func (scfcrd SapCloudForCustomerResourceDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -152476,6 +164866,11 @@ func (scfcrd SapCloudForCustomerResourceDataset) AsBinaryDataset() (*BinaryDatas
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for SapCloudForCustomerResourceDataset.
+func (scfcrd SapCloudForCustomerResourceDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for SapCloudForCustomerResourceDataset.
func (scfcrd SapCloudForCustomerResourceDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -152486,6 +164881,11 @@ func (scfcrd SapCloudForCustomerResourceDataset) AsParquetDataset() (*ParquetDat
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for SapCloudForCustomerResourceDataset.
+func (scfcrd SapCloudForCustomerResourceDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for SapCloudForCustomerResourceDataset.
func (scfcrd SapCloudForCustomerResourceDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -152632,7 +165032,7 @@ type SapCloudForCustomerSink struct {
SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzurePostgreSQLSink', 'TypeDelimitedTextSink'
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
Type TypeBasicCopySink `json:"type,omitempty"`
}
@@ -152787,6 +165187,11 @@ func (scfcs SapCloudForCustomerSink) AsParquetSink() (*ParquetSink, bool) {
return nil, false
}
+// AsAvroSink is the BasicCopySink implementation for SapCloudForCustomerSink.
+func (scfcs SapCloudForCustomerSink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
// AsAzureTableSink is the BasicCopySink implementation for SapCloudForCustomerSink.
func (scfcs SapCloudForCustomerSink) AsAzureTableSink() (*AzureTableSink, bool) {
return nil, false
@@ -152802,11 +165207,21 @@ func (scfcs SapCloudForCustomerSink) AsSapCloudForCustomerSink() (*SapCloudForCu
return &scfcs, true
}
+// AsAzureMySQLSink is the BasicCopySink implementation for SapCloudForCustomerSink.
+func (scfcs SapCloudForCustomerSink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
// AsAzurePostgreSQLSink is the BasicCopySink implementation for SapCloudForCustomerSink.
func (scfcs SapCloudForCustomerSink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
return nil, false
}
+// AsJSONSink is the BasicCopySink implementation for SapCloudForCustomerSink.
+func (scfcs SapCloudForCustomerSink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSink is the BasicCopySink implementation for SapCloudForCustomerSink.
func (scfcs SapCloudForCustomerSink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
return nil, false
@@ -152924,7 +165339,7 @@ type SapCloudForCustomerSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -153048,6 +165463,11 @@ func (scfcs SapCloudForCustomerSource) AsMarketoSource() (*MarketoSource, bool)
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for SapCloudForCustomerSource.
+func (scfcs SapCloudForCustomerSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for SapCloudForCustomerSource.
func (scfcs SapCloudForCustomerSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -153268,6 +165688,11 @@ func (scfcs SapCloudForCustomerSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for SapCloudForCustomerSource.
+func (scfcs SapCloudForCustomerSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for SapCloudForCustomerSource.
func (scfcs SapCloudForCustomerSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -153343,6 +165768,11 @@ func (scfcs SapCloudForCustomerSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for SapCloudForCustomerSource.
+func (scfcs SapCloudForCustomerSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for SapCloudForCustomerSource.
func (scfcs SapCloudForCustomerSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -153353,6 +165783,11 @@ func (scfcs SapCloudForCustomerSource) AsParquetSource() (*ParquetSource, bool)
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for SapCloudForCustomerSource.
+func (scfcs SapCloudForCustomerSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for SapCloudForCustomerSource.
func (scfcs SapCloudForCustomerSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -153449,7 +165884,7 @@ type SapEccLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -153601,6 +166036,11 @@ func (sels SapEccLinkedService) AsMarketoLinkedService() (*MarketoLinkedService,
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for SapEccLinkedService.
+func (sels SapEccLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for SapEccLinkedService.
func (sels SapEccLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -154104,7 +166544,7 @@ type SapEccResourceDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -154240,6 +166680,11 @@ func (serd SapEccResourceDataset) AsMarketoObjectDataset() (*MarketoObjectDatase
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for SapEccResourceDataset.
+func (serd SapEccResourceDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for SapEccResourceDataset.
func (serd SapEccResourceDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -154365,6 +166810,16 @@ func (serd SapEccResourceDataset) AsSapCloudForCustomerResourceDataset() (*SapCl
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for SapEccResourceDataset.
+func (serd SapEccResourceDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for SapEccResourceDataset.
+func (serd SapEccResourceDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for SapEccResourceDataset.
func (serd SapEccResourceDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -154405,6 +166860,16 @@ func (serd SapEccResourceDataset) AsRelationalTableDataset() (*RelationalTableDa
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for SapEccResourceDataset.
+func (serd SapEccResourceDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for SapEccResourceDataset.
+func (serd SapEccResourceDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for SapEccResourceDataset.
func (serd SapEccResourceDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -154520,6 +166985,11 @@ func (serd SapEccResourceDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for SapEccResourceDataset.
+func (serd SapEccResourceDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for SapEccResourceDataset.
func (serd SapEccResourceDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -154530,6 +167000,11 @@ func (serd SapEccResourceDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for SapEccResourceDataset.
+func (serd SapEccResourceDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for SapEccResourceDataset.
func (serd SapEccResourceDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -154671,7 +167146,7 @@ type SapEccSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -154795,6 +167270,11 @@ func (ses SapEccSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for SapEccSource.
+func (ses SapEccSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for SapEccSource.
func (ses SapEccSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -155015,6 +167495,11 @@ func (ses SapEccSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for SapEccSource.
+func (ses SapEccSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for SapEccSource.
func (ses SapEccSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -155090,6 +167575,11 @@ func (ses SapEccSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for SapEccSource.
+func (ses SapEccSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for SapEccSource.
func (ses SapEccSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -155100,6 +167590,11 @@ func (ses SapEccSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for SapEccSource.
+func (ses SapEccSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for SapEccSource.
func (ses SapEccSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -155196,7 +167691,7 @@ type SapHanaLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -155348,6 +167843,11 @@ func (shls SapHanaLinkedService) AsMarketoLinkedService() (*MarketoLinkedService
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for SapHanaLinkedService.
+func (shls SapHanaLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for SapHanaLinkedService.
func (shls SapHanaLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -155867,7 +168367,7 @@ type SapHanaSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -155994,6 +168494,11 @@ func (shs SapHanaSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for SapHanaSource.
+func (shs SapHanaSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for SapHanaSource.
func (shs SapHanaSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -156214,6 +168719,11 @@ func (shs SapHanaSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for SapHanaSource.
+func (shs SapHanaSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for SapHanaSource.
func (shs SapHanaSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -156289,6 +168799,11 @@ func (shs SapHanaSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for SapHanaSource.
+func (shs SapHanaSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for SapHanaSource.
func (shs SapHanaSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -156299,6 +168814,11 @@ func (shs SapHanaSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for SapHanaSource.
+func (shs SapHanaSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for SapHanaSource.
func (shs SapHanaSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -156410,7 +168930,7 @@ type SapHanaTableDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -156546,6 +169066,11 @@ func (shtd SapHanaTableDataset) AsMarketoObjectDataset() (*MarketoObjectDataset,
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for SapHanaTableDataset.
+func (shtd SapHanaTableDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for SapHanaTableDataset.
func (shtd SapHanaTableDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -156671,6 +169196,16 @@ func (shtd SapHanaTableDataset) AsSapCloudForCustomerResourceDataset() (*SapClou
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for SapHanaTableDataset.
+func (shtd SapHanaTableDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for SapHanaTableDataset.
+func (shtd SapHanaTableDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for SapHanaTableDataset.
func (shtd SapHanaTableDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -156711,6 +169246,16 @@ func (shtd SapHanaTableDataset) AsRelationalTableDataset() (*RelationalTableData
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for SapHanaTableDataset.
+func (shtd SapHanaTableDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for SapHanaTableDataset.
+func (shtd SapHanaTableDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for SapHanaTableDataset.
func (shtd SapHanaTableDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -156826,6 +169371,11 @@ func (shtd SapHanaTableDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for SapHanaTableDataset.
+func (shtd SapHanaTableDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for SapHanaTableDataset.
func (shtd SapHanaTableDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -156836,6 +169386,11 @@ func (shtd SapHanaTableDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for SapHanaTableDataset.
+func (shtd SapHanaTableDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for SapHanaTableDataset.
func (shtd SapHanaTableDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -156981,7 +169536,7 @@ type SapOpenHubLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -157133,6 +169688,11 @@ func (sohls SapOpenHubLinkedService) AsMarketoLinkedService() (*MarketoLinkedSer
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for SapOpenHubLinkedService.
+func (sohls SapOpenHubLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for SapOpenHubLinkedService.
func (sohls SapOpenHubLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -157652,6 +170212,10 @@ func (sohlstp *SapOpenHubLinkedServiceTypeProperties) UnmarshalJSON(body []byte)
// SapOpenHubSource a copy activity source for SAP Business Warehouse Open Hub Destination source.
type SapOpenHubSource struct {
+ // ExcludeLastRequest - Whether to exclude the records of the last request. The default value is true. Type: boolean (or Expression with resultType boolean).
+ ExcludeLastRequest interface{} `json:"excludeLastRequest,omitempty"`
+ // BaseRequestID - The ID of request for delta loading. Once it is set, only data with requestId larger than the value of this property will be retrieved. The default value is 0. Type: integer (or Expression with resultType integer ).
+ BaseRequestID interface{} `json:"baseRequestId,omitempty"`
// AdditionalProperties - Unmatched properties from the message are deserialized this collection
AdditionalProperties map[string]interface{} `json:""`
// SourceRetryCount - Source retry count. Type: integer (or Expression with resultType integer).
@@ -157660,7 +170224,7 @@ type SapOpenHubSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -157668,6 +170232,12 @@ type SapOpenHubSource struct {
func (sohs SapOpenHubSource) MarshalJSON() ([]byte, error) {
sohs.Type = TypeSapOpenHubSource
objectMap := make(map[string]interface{})
+ if sohs.ExcludeLastRequest != nil {
+ objectMap["excludeLastRequest"] = sohs.ExcludeLastRequest
+ }
+ if sohs.BaseRequestID != nil {
+ objectMap["baseRequestId"] = sohs.BaseRequestID
+ }
if sohs.SourceRetryCount != nil {
objectMap["sourceRetryCount"] = sohs.SourceRetryCount
}
@@ -157781,6 +170351,11 @@ func (sohs SapOpenHubSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for SapOpenHubSource.
+func (sohs SapOpenHubSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for SapOpenHubSource.
func (sohs SapOpenHubSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -158001,6 +170576,11 @@ func (sohs SapOpenHubSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for SapOpenHubSource.
+func (sohs SapOpenHubSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for SapOpenHubSource.
func (sohs SapOpenHubSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -158076,6 +170656,11 @@ func (sohs SapOpenHubSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for SapOpenHubSource.
+func (sohs SapOpenHubSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for SapOpenHubSource.
func (sohs SapOpenHubSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -158086,6 +170671,11 @@ func (sohs SapOpenHubSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for SapOpenHubSource.
+func (sohs SapOpenHubSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for SapOpenHubSource.
func (sohs SapOpenHubSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -158105,6 +170695,24 @@ func (sohs *SapOpenHubSource) UnmarshalJSON(body []byte) error {
}
for k, v := range m {
switch k {
+ case "excludeLastRequest":
+ if v != nil {
+ var excludeLastRequest interface{}
+ err = json.Unmarshal(*v, &excludeLastRequest)
+ if err != nil {
+ return err
+ }
+ sohs.ExcludeLastRequest = excludeLastRequest
+ }
+ case "baseRequestId":
+ if v != nil {
+ var baseRequestID interface{}
+ err = json.Unmarshal(*v, &baseRequestID)
+ if err != nil {
+ return err
+ }
+ sohs.BaseRequestID = baseRequestID
+ }
default:
if v != nil {
var additionalProperties interface{}
@@ -158179,7 +170787,7 @@ type SapOpenHubTableDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -158315,6 +170923,11 @@ func (sohtd SapOpenHubTableDataset) AsMarketoObjectDataset() (*MarketoObjectData
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for SapOpenHubTableDataset.
+func (sohtd SapOpenHubTableDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for SapOpenHubTableDataset.
func (sohtd SapOpenHubTableDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -158440,6 +171053,16 @@ func (sohtd SapOpenHubTableDataset) AsSapCloudForCustomerResourceDataset() (*Sap
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for SapOpenHubTableDataset.
+func (sohtd SapOpenHubTableDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for SapOpenHubTableDataset.
+func (sohtd SapOpenHubTableDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for SapOpenHubTableDataset.
func (sohtd SapOpenHubTableDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -158480,6 +171103,16 @@ func (sohtd SapOpenHubTableDataset) AsRelationalTableDataset() (*RelationalTable
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for SapOpenHubTableDataset.
+func (sohtd SapOpenHubTableDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for SapOpenHubTableDataset.
+func (sohtd SapOpenHubTableDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for SapOpenHubTableDataset.
func (sohtd SapOpenHubTableDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -158595,6 +171228,11 @@ func (sohtd SapOpenHubTableDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for SapOpenHubTableDataset.
+func (sohtd SapOpenHubTableDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for SapOpenHubTableDataset.
func (sohtd SapOpenHubTableDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -158605,6 +171243,11 @@ func (sohtd SapOpenHubTableDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for SapOpenHubTableDataset.
+func (sohtd SapOpenHubTableDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for SapOpenHubTableDataset.
func (sohtd SapOpenHubTableDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -158752,7 +171395,7 @@ type SapTableLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -158904,6 +171547,11 @@ func (stls SapTableLinkedService) AsMarketoLinkedService() (*MarketoLinkedServic
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for SapTableLinkedService.
+func (stls SapTableLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for SapTableLinkedService.
func (stls SapTableLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -159551,7 +172199,7 @@ type SapTableResourceDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -159687,6 +172335,11 @@ func (strd SapTableResourceDataset) AsMarketoObjectDataset() (*MarketoObjectData
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for SapTableResourceDataset.
+func (strd SapTableResourceDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for SapTableResourceDataset.
func (strd SapTableResourceDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -159812,6 +172465,16 @@ func (strd SapTableResourceDataset) AsSapCloudForCustomerResourceDataset() (*Sap
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for SapTableResourceDataset.
+func (strd SapTableResourceDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for SapTableResourceDataset.
+func (strd SapTableResourceDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for SapTableResourceDataset.
func (strd SapTableResourceDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -159852,6 +172515,16 @@ func (strd SapTableResourceDataset) AsRelationalTableDataset() (*RelationalTable
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for SapTableResourceDataset.
+func (strd SapTableResourceDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for SapTableResourceDataset.
+func (strd SapTableResourceDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for SapTableResourceDataset.
func (strd SapTableResourceDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -159967,6 +172640,11 @@ func (strd SapTableResourceDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for SapTableResourceDataset.
+func (strd SapTableResourceDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for SapTableResourceDataset.
func (strd SapTableResourceDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -159977,6 +172655,11 @@ func (strd SapTableResourceDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for SapTableResourceDataset.
+func (strd SapTableResourceDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for SapTableResourceDataset.
func (strd SapTableResourceDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -160132,7 +172815,7 @@ type SapTableSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -160277,6 +172960,11 @@ func (sts SapTableSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for SapTableSource.
+func (sts SapTableSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for SapTableSource.
func (sts SapTableSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -160497,6 +173185,11 @@ func (sts SapTableSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for SapTableSource.
+func (sts SapTableSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for SapTableSource.
func (sts SapTableSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -160572,6 +173265,11 @@ func (sts SapTableSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for SapTableSource.
+func (sts SapTableSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for SapTableSource.
func (sts SapTableSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -160582,6 +173280,11 @@ func (sts SapTableSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for SapTableSource.
+func (sts SapTableSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for SapTableSource.
func (sts SapTableSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -160737,7 +173440,7 @@ type ScheduleTrigger struct {
AdditionalProperties map[string]interface{} `json:""`
// Description - Trigger description.
Description *string `json:"description,omitempty"`
- // RuntimeState - READ-ONLY; Indicates if trigger is running or not. Updated when Start/Stop APIs are called on the Trigger. Possible values include: 'Started', 'Stopped', 'Disabled'
+ // RuntimeState - READ-ONLY; Indicates if trigger is running or not. Updated when Start/Stop APIs are called on the Trigger. Possible values include: 'TriggerRuntimeStateStarted', 'TriggerRuntimeStateStopped', 'TriggerRuntimeStateDisabled'
RuntimeState TriggerRuntimeState `json:"runtimeState,omitempty"`
// Annotations - List of tags that can be used for describing the trigger.
Annotations *[]interface{} `json:"annotations,omitempty"`
@@ -161577,7 +174280,7 @@ type ServiceNowLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -161729,6 +174432,11 @@ func (snls ServiceNowLinkedService) AsMarketoLinkedService() (*MarketoLinkedServ
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for ServiceNowLinkedService.
+func (snls ServiceNowLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for ServiceNowLinkedService.
func (snls ServiceNowLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -162297,7 +175005,7 @@ type ServiceNowObjectDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -162433,6 +175141,11 @@ func (snod ServiceNowObjectDataset) AsMarketoObjectDataset() (*MarketoObjectData
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for ServiceNowObjectDataset.
+func (snod ServiceNowObjectDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for ServiceNowObjectDataset.
func (snod ServiceNowObjectDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -162558,6 +175271,16 @@ func (snod ServiceNowObjectDataset) AsSapCloudForCustomerResourceDataset() (*Sap
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for ServiceNowObjectDataset.
+func (snod ServiceNowObjectDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for ServiceNowObjectDataset.
+func (snod ServiceNowObjectDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for ServiceNowObjectDataset.
func (snod ServiceNowObjectDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -162598,6 +175321,16 @@ func (snod ServiceNowObjectDataset) AsRelationalTableDataset() (*RelationalTable
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for ServiceNowObjectDataset.
+func (snod ServiceNowObjectDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for ServiceNowObjectDataset.
+func (snod ServiceNowObjectDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for ServiceNowObjectDataset.
func (snod ServiceNowObjectDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -162713,6 +175446,11 @@ func (snod ServiceNowObjectDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for ServiceNowObjectDataset.
+func (snod ServiceNowObjectDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for ServiceNowObjectDataset.
func (snod ServiceNowObjectDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -162723,6 +175461,11 @@ func (snod ServiceNowObjectDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for ServiceNowObjectDataset.
+func (snod ServiceNowObjectDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for ServiceNowObjectDataset.
func (snod ServiceNowObjectDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -162858,7 +175601,7 @@ type ServiceNowSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -162982,6 +175725,11 @@ func (sns ServiceNowSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for ServiceNowSource.
+func (sns ServiceNowSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for ServiceNowSource.
func (sns ServiceNowSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -163202,6 +175950,11 @@ func (sns ServiceNowSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for ServiceNowSource.
+func (sns ServiceNowSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for ServiceNowSource.
func (sns ServiceNowSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -163277,6 +176030,11 @@ func (sns ServiceNowSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for ServiceNowSource.
+func (sns ServiceNowSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for ServiceNowSource.
func (sns ServiceNowSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -163287,6 +176045,11 @@ func (sns ServiceNowSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for ServiceNowSource.
+func (sns ServiceNowSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for ServiceNowSource.
func (sns ServiceNowSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -163927,7 +176690,7 @@ type SftpServerLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -164079,6 +176842,11 @@ func (ssls SftpServerLinkedService) AsMarketoLinkedService() (*MarketoLinkedServ
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for SftpServerLinkedService.
+func (ssls SftpServerLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for SftpServerLinkedService.
func (ssls SftpServerLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -164651,7 +177419,7 @@ type ShopifyLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -164803,6 +177571,11 @@ func (sls ShopifyLinkedService) AsMarketoLinkedService() (*MarketoLinkedService,
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for ShopifyLinkedService.
+func (sls ShopifyLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for ShopifyLinkedService.
func (sls ShopifyLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -165328,7 +178101,7 @@ type ShopifyObjectDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -165464,6 +178237,11 @@ func (sod ShopifyObjectDataset) AsMarketoObjectDataset() (*MarketoObjectDataset,
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for ShopifyObjectDataset.
+func (sod ShopifyObjectDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for ShopifyObjectDataset.
func (sod ShopifyObjectDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -165589,6 +178367,16 @@ func (sod ShopifyObjectDataset) AsSapCloudForCustomerResourceDataset() (*SapClou
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for ShopifyObjectDataset.
+func (sod ShopifyObjectDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for ShopifyObjectDataset.
+func (sod ShopifyObjectDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for ShopifyObjectDataset.
func (sod ShopifyObjectDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -165629,6 +178417,16 @@ func (sod ShopifyObjectDataset) AsRelationalTableDataset() (*RelationalTableData
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for ShopifyObjectDataset.
+func (sod ShopifyObjectDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for ShopifyObjectDataset.
+func (sod ShopifyObjectDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for ShopifyObjectDataset.
func (sod ShopifyObjectDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -165744,6 +178542,11 @@ func (sod ShopifyObjectDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for ShopifyObjectDataset.
+func (sod ShopifyObjectDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for ShopifyObjectDataset.
func (sod ShopifyObjectDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -165754,6 +178557,11 @@ func (sod ShopifyObjectDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for ShopifyObjectDataset.
+func (sod ShopifyObjectDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for ShopifyObjectDataset.
func (sod ShopifyObjectDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -165889,7 +178697,7 @@ type ShopifySource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -166013,6 +178821,11 @@ func (ss ShopifySource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for ShopifySource.
+func (ss ShopifySource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for ShopifySource.
func (ss ShopifySource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -166233,6 +179046,11 @@ func (ss ShopifySource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for ShopifySource.
+func (ss ShopifySource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for ShopifySource.
func (ss ShopifySource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -166308,6 +179126,11 @@ func (ss ShopifySource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for ShopifySource.
+func (ss ShopifySource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for ShopifySource.
func (ss ShopifySource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -166318,6 +179141,11 @@ func (ss ShopifySource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for ShopifySource.
+func (ss ShopifySource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for ShopifySource.
func (ss ShopifySource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -166400,6 +179228,16 @@ func (ss *ShopifySource) UnmarshalJSON(body []byte) error {
return nil
}
+// SparkDatasetTypeProperties spark Properties
+type SparkDatasetTypeProperties struct {
+ // TableName - This property will be retired. Please consider using schema + table properties instead.
+ TableName interface{} `json:"tableName,omitempty"`
+ // Table - The table name of the Spark. Type: string (or Expression with resultType string).
+ Table interface{} `json:"table,omitempty"`
+ // Schema - The schema name of the Spark. Type: string (or Expression with resultType string).
+ Schema interface{} `json:"schema,omitempty"`
+}
+
// SparkLinkedService spark Server linked service.
type SparkLinkedService struct {
// SparkLinkedServiceTypeProperties - Spark Server linked service properties.
@@ -166414,7 +179252,7 @@ type SparkLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -166566,6 +179404,11 @@ func (sls SparkLinkedService) AsMarketoLinkedService() (*MarketoLinkedService, b
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for SparkLinkedService.
+func (sls SparkLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for SparkLinkedService.
func (sls SparkLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -167161,8 +180004,8 @@ func (slstp *SparkLinkedServiceTypeProperties) UnmarshalJSON(body []byte) error
// SparkObjectDataset spark Server dataset.
type SparkObjectDataset struct {
- // GenericDatasetTypeProperties - Properties specific to this dataset type.
- *GenericDatasetTypeProperties `json:"typeProperties,omitempty"`
+ // SparkDatasetTypeProperties - Properties specific to this dataset type.
+ *SparkDatasetTypeProperties `json:"typeProperties,omitempty"`
// AdditionalProperties - Unmatched properties from the message are deserialized this collection
AdditionalProperties map[string]interface{} `json:""`
// Description - Dataset description.
@@ -167179,7 +180022,7 @@ type SparkObjectDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -167187,8 +180030,8 @@ type SparkObjectDataset struct {
func (sod SparkObjectDataset) MarshalJSON() ([]byte, error) {
sod.Type = TypeSparkObject
objectMap := make(map[string]interface{})
- if sod.GenericDatasetTypeProperties != nil {
- objectMap["typeProperties"] = sod.GenericDatasetTypeProperties
+ if sod.SparkDatasetTypeProperties != nil {
+ objectMap["typeProperties"] = sod.SparkDatasetTypeProperties
}
if sod.Description != nil {
objectMap["description"] = sod.Description
@@ -167315,6 +180158,11 @@ func (sod SparkObjectDataset) AsMarketoObjectDataset() (*MarketoObjectDataset, b
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for SparkObjectDataset.
+func (sod SparkObjectDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for SparkObjectDataset.
func (sod SparkObjectDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -167440,6 +180288,16 @@ func (sod SparkObjectDataset) AsSapCloudForCustomerResourceDataset() (*SapCloudF
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for SparkObjectDataset.
+func (sod SparkObjectDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for SparkObjectDataset.
+func (sod SparkObjectDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for SparkObjectDataset.
func (sod SparkObjectDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -167480,6 +180338,16 @@ func (sod SparkObjectDataset) AsRelationalTableDataset() (*RelationalTableDatase
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for SparkObjectDataset.
+func (sod SparkObjectDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for SparkObjectDataset.
+func (sod SparkObjectDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for SparkObjectDataset.
func (sod SparkObjectDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -167595,6 +180463,11 @@ func (sod SparkObjectDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for SparkObjectDataset.
+func (sod SparkObjectDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for SparkObjectDataset.
func (sod SparkObjectDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -167605,6 +180478,11 @@ func (sod SparkObjectDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for SparkObjectDataset.
+func (sod SparkObjectDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for SparkObjectDataset.
func (sod SparkObjectDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -167631,12 +180509,12 @@ func (sod *SparkObjectDataset) UnmarshalJSON(body []byte) error {
switch k {
case "typeProperties":
if v != nil {
- var genericDatasetTypeProperties GenericDatasetTypeProperties
- err = json.Unmarshal(*v, &genericDatasetTypeProperties)
+ var sparkDatasetTypeProperties SparkDatasetTypeProperties
+ err = json.Unmarshal(*v, &sparkDatasetTypeProperties)
if err != nil {
return err
}
- sod.GenericDatasetTypeProperties = &genericDatasetTypeProperties
+ sod.SparkDatasetTypeProperties = &sparkDatasetTypeProperties
}
default:
if v != nil {
@@ -167740,7 +180618,7 @@ type SparkSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -167864,6 +180742,11 @@ func (ss SparkSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for SparkSource.
+func (ss SparkSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for SparkSource.
func (ss SparkSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -168084,6 +180967,11 @@ func (ss SparkSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for SparkSource.
+func (ss SparkSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for SparkSource.
func (ss SparkSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -168159,6 +181047,11 @@ func (ss SparkSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for SparkSource.
+func (ss SparkSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for SparkSource.
func (ss SparkSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -168169,6 +181062,11 @@ func (ss SparkSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for SparkSource.
+func (ss SparkSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for SparkSource.
func (ss SparkSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -168259,6 +181157,8 @@ type SQLDWSink struct {
AllowPolyBase interface{} `json:"allowPolyBase,omitempty"`
// PolyBaseSettings - Specifies PolyBase-related settings when allowPolyBase is true.
PolyBaseSettings *PolybaseSettings `json:"polyBaseSettings,omitempty"`
+ // TableOption - The option to handle sink table, such as autoCreate. For now only 'autoCreate' value is supported. Type: string (or Expression with resultType string).
+ TableOption interface{} `json:"tableOption,omitempty"`
// AdditionalProperties - Unmatched properties from the message are deserialized this collection
AdditionalProperties map[string]interface{} `json:""`
// WriteBatchSize - Write batch size. Type: integer (or Expression with resultType integer), minimum: 0.
@@ -168271,7 +181171,7 @@ type SQLDWSink struct {
SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzurePostgreSQLSink', 'TypeDelimitedTextSink'
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
Type TypeBasicCopySink `json:"type,omitempty"`
}
@@ -168288,6 +181188,9 @@ func (sds SQLDWSink) MarshalJSON() ([]byte, error) {
if sds.PolyBaseSettings != nil {
objectMap["polyBaseSettings"] = sds.PolyBaseSettings
}
+ if sds.TableOption != nil {
+ objectMap["tableOption"] = sds.TableOption
+ }
if sds.WriteBatchSize != nil {
objectMap["writeBatchSize"] = sds.WriteBatchSize
}
@@ -168432,6 +181335,11 @@ func (sds SQLDWSink) AsParquetSink() (*ParquetSink, bool) {
return nil, false
}
+// AsAvroSink is the BasicCopySink implementation for SQLDWSink.
+func (sds SQLDWSink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
// AsAzureTableSink is the BasicCopySink implementation for SQLDWSink.
func (sds SQLDWSink) AsAzureTableSink() (*AzureTableSink, bool) {
return nil, false
@@ -168447,11 +181355,21 @@ func (sds SQLDWSink) AsSapCloudForCustomerSink() (*SapCloudForCustomerSink, bool
return nil, false
}
+// AsAzureMySQLSink is the BasicCopySink implementation for SQLDWSink.
+func (sds SQLDWSink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
// AsAzurePostgreSQLSink is the BasicCopySink implementation for SQLDWSink.
func (sds SQLDWSink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
return nil, false
}
+// AsJSONSink is the BasicCopySink implementation for SQLDWSink.
+func (sds SQLDWSink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSink is the BasicCopySink implementation for SQLDWSink.
func (sds SQLDWSink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
return nil, false
@@ -168503,6 +181421,15 @@ func (sds *SQLDWSink) UnmarshalJSON(body []byte) error {
}
sds.PolyBaseSettings = &polyBaseSettings
}
+ case "tableOption":
+ if v != nil {
+ var tableOption interface{}
+ err = json.Unmarshal(*v, &tableOption)
+ if err != nil {
+ return err
+ }
+ sds.TableOption = tableOption
+ }
default:
if v != nil {
var additionalProperties interface{}
@@ -168591,7 +181518,7 @@ type SQLDWSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -168721,6 +181648,11 @@ func (sds SQLDWSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for SQLDWSource.
+func (sds SQLDWSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for SQLDWSource.
func (sds SQLDWSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -168941,6 +181873,11 @@ func (sds SQLDWSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for SQLDWSource.
+func (sds SQLDWSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for SQLDWSource.
func (sds SQLDWSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -169016,6 +181953,11 @@ func (sds SQLDWSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for SQLDWSource.
+func (sds SQLDWSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for SQLDWSource.
func (sds SQLDWSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -169026,6 +181968,11 @@ func (sds SQLDWSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for SQLDWSource.
+func (sds SQLDWSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for SQLDWSource.
func (sds SQLDWSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -169138,6 +182085,8 @@ type SQLMISink struct {
StoredProcedureParameters map[string]*StoredProcedureParameter `json:"storedProcedureParameters"`
// StoredProcedureTableTypeParameterName - The stored procedure parameter name of the table type. Type: string (or Expression with resultType string).
StoredProcedureTableTypeParameterName interface{} `json:"storedProcedureTableTypeParameterName,omitempty"`
+ // TableOption - The option to handle sink table, such as autoCreate. For now only 'autoCreate' value is supported. Type: string (or Expression with resultType string).
+ TableOption interface{} `json:"tableOption,omitempty"`
// AdditionalProperties - Unmatched properties from the message are deserialized this collection
AdditionalProperties map[string]interface{} `json:""`
// WriteBatchSize - Write batch size. Type: integer (or Expression with resultType integer), minimum: 0.
@@ -169150,7 +182099,7 @@ type SQLMISink struct {
SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzurePostgreSQLSink', 'TypeDelimitedTextSink'
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
Type TypeBasicCopySink `json:"type,omitempty"`
}
@@ -169173,6 +182122,9 @@ func (sms SQLMISink) MarshalJSON() ([]byte, error) {
if sms.StoredProcedureTableTypeParameterName != nil {
objectMap["storedProcedureTableTypeParameterName"] = sms.StoredProcedureTableTypeParameterName
}
+ if sms.TableOption != nil {
+ objectMap["tableOption"] = sms.TableOption
+ }
if sms.WriteBatchSize != nil {
objectMap["writeBatchSize"] = sms.WriteBatchSize
}
@@ -169317,6 +182269,11 @@ func (sms SQLMISink) AsParquetSink() (*ParquetSink, bool) {
return nil, false
}
+// AsAvroSink is the BasicCopySink implementation for SQLMISink.
+func (sms SQLMISink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
// AsAzureTableSink is the BasicCopySink implementation for SQLMISink.
func (sms SQLMISink) AsAzureTableSink() (*AzureTableSink, bool) {
return nil, false
@@ -169332,11 +182289,21 @@ func (sms SQLMISink) AsSapCloudForCustomerSink() (*SapCloudForCustomerSink, bool
return nil, false
}
+// AsAzureMySQLSink is the BasicCopySink implementation for SQLMISink.
+func (sms SQLMISink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
// AsAzurePostgreSQLSink is the BasicCopySink implementation for SQLMISink.
func (sms SQLMISink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
return nil, false
}
+// AsJSONSink is the BasicCopySink implementation for SQLMISink.
+func (sms SQLMISink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSink is the BasicCopySink implementation for SQLMISink.
func (sms SQLMISink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
return nil, false
@@ -169406,6 +182373,15 @@ func (sms *SQLMISink) UnmarshalJSON(body []byte) error {
}
sms.StoredProcedureTableTypeParameterName = storedProcedureTableTypeParameterName
}
+ case "tableOption":
+ if v != nil {
+ var tableOption interface{}
+ err = json.Unmarshal(*v, &tableOption)
+ if err != nil {
+ return err
+ }
+ sms.TableOption = tableOption
+ }
default:
if v != nil {
var additionalProperties interface{}
@@ -169496,7 +182472,7 @@ type SQLMISource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -169629,6 +182605,11 @@ func (sms SQLMISource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for SQLMISource.
+func (sms SQLMISource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for SQLMISource.
func (sms SQLMISource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -169849,6 +182830,11 @@ func (sms SQLMISource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for SQLMISource.
+func (sms SQLMISource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for SQLMISource.
func (sms SQLMISource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -169924,6 +182910,11 @@ func (sms SQLMISource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for SQLMISource.
+func (sms SQLMISource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for SQLMISource.
func (sms SQLMISource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -169934,6 +182925,11 @@ func (sms SQLMISource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for SQLMISource.
+func (sms SQLMISource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for SQLMISource.
func (sms SQLMISource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -170057,7 +183053,7 @@ type SQLServerLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -170209,6 +183205,11 @@ func (ssls SQLServerLinkedService) AsMarketoLinkedService() (*MarketoLinkedServi
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for SQLServerLinkedService.
+func (ssls SQLServerLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for SQLServerLinkedService.
func (ssls SQLServerLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -170704,6 +183705,8 @@ type SQLServerSink struct {
StoredProcedureParameters map[string]*StoredProcedureParameter `json:"storedProcedureParameters"`
// StoredProcedureTableTypeParameterName - The stored procedure parameter name of the table type. Type: string (or Expression with resultType string).
StoredProcedureTableTypeParameterName interface{} `json:"storedProcedureTableTypeParameterName,omitempty"`
+ // TableOption - The option to handle sink table, such as autoCreate. For now only 'autoCreate' value is supported. Type: string (or Expression with resultType string).
+ TableOption interface{} `json:"tableOption,omitempty"`
// AdditionalProperties - Unmatched properties from the message are deserialized this collection
AdditionalProperties map[string]interface{} `json:""`
// WriteBatchSize - Write batch size. Type: integer (or Expression with resultType integer), minimum: 0.
@@ -170716,7 +183719,7 @@ type SQLServerSink struct {
SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzurePostgreSQLSink', 'TypeDelimitedTextSink'
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
Type TypeBasicCopySink `json:"type,omitempty"`
}
@@ -170739,6 +183742,9 @@ func (sss SQLServerSink) MarshalJSON() ([]byte, error) {
if sss.StoredProcedureTableTypeParameterName != nil {
objectMap["storedProcedureTableTypeParameterName"] = sss.StoredProcedureTableTypeParameterName
}
+ if sss.TableOption != nil {
+ objectMap["tableOption"] = sss.TableOption
+ }
if sss.WriteBatchSize != nil {
objectMap["writeBatchSize"] = sss.WriteBatchSize
}
@@ -170883,6 +183889,11 @@ func (sss SQLServerSink) AsParquetSink() (*ParquetSink, bool) {
return nil, false
}
+// AsAvroSink is the BasicCopySink implementation for SQLServerSink.
+func (sss SQLServerSink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
// AsAzureTableSink is the BasicCopySink implementation for SQLServerSink.
func (sss SQLServerSink) AsAzureTableSink() (*AzureTableSink, bool) {
return nil, false
@@ -170898,11 +183909,21 @@ func (sss SQLServerSink) AsSapCloudForCustomerSink() (*SapCloudForCustomerSink,
return nil, false
}
+// AsAzureMySQLSink is the BasicCopySink implementation for SQLServerSink.
+func (sss SQLServerSink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
// AsAzurePostgreSQLSink is the BasicCopySink implementation for SQLServerSink.
func (sss SQLServerSink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
return nil, false
}
+// AsJSONSink is the BasicCopySink implementation for SQLServerSink.
+func (sss SQLServerSink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSink is the BasicCopySink implementation for SQLServerSink.
func (sss SQLServerSink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
return nil, false
@@ -170972,6 +183993,15 @@ func (sss *SQLServerSink) UnmarshalJSON(body []byte) error {
}
sss.StoredProcedureTableTypeParameterName = storedProcedureTableTypeParameterName
}
+ case "tableOption":
+ if v != nil {
+ var tableOption interface{}
+ err = json.Unmarshal(*v, &tableOption)
+ if err != nil {
+ return err
+ }
+ sss.TableOption = tableOption
+ }
default:
if v != nil {
var additionalProperties interface{}
@@ -171062,7 +184092,7 @@ type SQLServerSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -171195,6 +184225,11 @@ func (sss SQLServerSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for SQLServerSource.
+func (sss SQLServerSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for SQLServerSource.
func (sss SQLServerSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -171415,6 +184450,11 @@ func (sss SQLServerSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for SQLServerSource.
+func (sss SQLServerSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for SQLServerSource.
func (sss SQLServerSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -171490,6 +184530,11 @@ func (sss SQLServerSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for SQLServerSource.
+func (sss SQLServerSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for SQLServerSource.
func (sss SQLServerSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -171500,6 +184545,11 @@ func (sss SQLServerSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for SQLServerSource.
+func (sss SQLServerSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for SQLServerSource.
func (sss SQLServerSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -171989,7 +185039,7 @@ type SQLServerTableDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -172125,6 +185175,11 @@ func (sstd SQLServerTableDataset) AsMarketoObjectDataset() (*MarketoObjectDatase
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for SQLServerTableDataset.
+func (sstd SQLServerTableDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for SQLServerTableDataset.
func (sstd SQLServerTableDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -172250,6 +185305,16 @@ func (sstd SQLServerTableDataset) AsSapCloudForCustomerResourceDataset() (*SapCl
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for SQLServerTableDataset.
+func (sstd SQLServerTableDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for SQLServerTableDataset.
+func (sstd SQLServerTableDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for SQLServerTableDataset.
func (sstd SQLServerTableDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -172290,6 +185355,16 @@ func (sstd SQLServerTableDataset) AsRelationalTableDataset() (*RelationalTableDa
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for SQLServerTableDataset.
+func (sstd SQLServerTableDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for SQLServerTableDataset.
+func (sstd SQLServerTableDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for SQLServerTableDataset.
func (sstd SQLServerTableDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -172405,6 +185480,11 @@ func (sstd SQLServerTableDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for SQLServerTableDataset.
+func (sstd SQLServerTableDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for SQLServerTableDataset.
func (sstd SQLServerTableDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -172415,6 +185495,11 @@ func (sstd SQLServerTableDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for SQLServerTableDataset.
+func (sstd SQLServerTableDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for SQLServerTableDataset.
func (sstd SQLServerTableDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -172540,8 +185625,12 @@ func (sstd *SQLServerTableDataset) UnmarshalJSON(body []byte) error {
// SQLServerTableDatasetTypeProperties on-premises SQL Server dataset properties.
type SQLServerTableDatasetTypeProperties struct {
- // TableName - The table name of the SQL Server dataset. Type: string (or Expression with resultType string).
+ // TableName - This property will be retired. Please consider using schema + table properties instead.
TableName interface{} `json:"tableName,omitempty"`
+ // Schema - The schema name of the SQL Server dataset. Type: string (or Expression with resultType string).
+ Schema interface{} `json:"schema,omitempty"`
+ // Table - The table name of the SQL Server dataset. Type: string (or Expression with resultType string).
+ Table interface{} `json:"table,omitempty"`
}
// SQLSink a copy activity SQL sink.
@@ -172556,6 +185645,8 @@ type SQLSink struct {
StoredProcedureParameters map[string]*StoredProcedureParameter `json:"storedProcedureParameters"`
// StoredProcedureTableTypeParameterName - The stored procedure parameter name of the table type. Type: string (or Expression with resultType string).
StoredProcedureTableTypeParameterName interface{} `json:"storedProcedureTableTypeParameterName,omitempty"`
+ // TableOption - The option to handle sink table, such as autoCreate. For now only 'autoCreate' value is supported. Type: string (or Expression with resultType string).
+ TableOption interface{} `json:"tableOption,omitempty"`
// AdditionalProperties - Unmatched properties from the message are deserialized this collection
AdditionalProperties map[string]interface{} `json:""`
// WriteBatchSize - Write batch size. Type: integer (or Expression with resultType integer), minimum: 0.
@@ -172568,7 +185659,7 @@ type SQLSink struct {
SinkRetryWait interface{} `json:"sinkRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the sink data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzurePostgreSQLSink', 'TypeDelimitedTextSink'
+ // Type - Possible values include: 'TypeCopySink', 'TypeCosmosDbMongoDbAPISink', 'TypeSalesforceServiceCloudSink', 'TypeSalesforceSink', 'TypeAzureDataExplorerSink', 'TypeCommonDataServiceForAppsSink', 'TypeDynamicsCrmSink', 'TypeDynamicsSink', 'TypeMicrosoftAccessSink', 'TypeInformixSink', 'TypeOdbcSink', 'TypeAzureSearchIndexSink', 'TypeAzureBlobFSSink', 'TypeAzureDataLakeStoreSink', 'TypeOracleSink', 'TypeSQLDWSink', 'TypeSQLMISink', 'TypeAzureSQLSink', 'TypeSQLServerSink', 'TypeSQLSink', 'TypeDocumentDbCollectionSink', 'TypeFileSystemSink', 'TypeBlobSink', 'TypeBinarySink', 'TypeParquetSink', 'TypeAvroSink', 'TypeAzureTableSink', 'TypeAzureQueueSink', 'TypeSapCloudForCustomerSink', 'TypeAzureMySQLSink', 'TypeAzurePostgreSQLSink', 'TypeJSONSink', 'TypeDelimitedTextSink'
Type TypeBasicCopySink `json:"type,omitempty"`
}
@@ -172591,6 +185682,9 @@ func (ss SQLSink) MarshalJSON() ([]byte, error) {
if ss.StoredProcedureTableTypeParameterName != nil {
objectMap["storedProcedureTableTypeParameterName"] = ss.StoredProcedureTableTypeParameterName
}
+ if ss.TableOption != nil {
+ objectMap["tableOption"] = ss.TableOption
+ }
if ss.WriteBatchSize != nil {
objectMap["writeBatchSize"] = ss.WriteBatchSize
}
@@ -172735,6 +185829,11 @@ func (ss SQLSink) AsParquetSink() (*ParquetSink, bool) {
return nil, false
}
+// AsAvroSink is the BasicCopySink implementation for SQLSink.
+func (ss SQLSink) AsAvroSink() (*AvroSink, bool) {
+ return nil, false
+}
+
// AsAzureTableSink is the BasicCopySink implementation for SQLSink.
func (ss SQLSink) AsAzureTableSink() (*AzureTableSink, bool) {
return nil, false
@@ -172750,11 +185849,21 @@ func (ss SQLSink) AsSapCloudForCustomerSink() (*SapCloudForCustomerSink, bool) {
return nil, false
}
+// AsAzureMySQLSink is the BasicCopySink implementation for SQLSink.
+func (ss SQLSink) AsAzureMySQLSink() (*AzureMySQLSink, bool) {
+ return nil, false
+}
+
// AsAzurePostgreSQLSink is the BasicCopySink implementation for SQLSink.
func (ss SQLSink) AsAzurePostgreSQLSink() (*AzurePostgreSQLSink, bool) {
return nil, false
}
+// AsJSONSink is the BasicCopySink implementation for SQLSink.
+func (ss SQLSink) AsJSONSink() (*JSONSink, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSink is the BasicCopySink implementation for SQLSink.
func (ss SQLSink) AsDelimitedTextSink() (*DelimitedTextSink, bool) {
return nil, false
@@ -172824,6 +185933,15 @@ func (ss *SQLSink) UnmarshalJSON(body []byte) error {
}
ss.StoredProcedureTableTypeParameterName = storedProcedureTableTypeParameterName
}
+ case "tableOption":
+ if v != nil {
+ var tableOption interface{}
+ err = json.Unmarshal(*v, &tableOption)
+ if err != nil {
+ return err
+ }
+ ss.TableOption = tableOption
+ }
default:
if v != nil {
var additionalProperties interface{}
@@ -172912,7 +186030,7 @@ type SQLSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -173042,6 +186160,11 @@ func (ss SQLSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for SQLSource.
+func (ss SQLSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for SQLSource.
func (ss SQLSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -173262,6 +186385,11 @@ func (ss SQLSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for SQLSource.
+func (ss SQLSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for SQLSource.
func (ss SQLSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -173337,6 +186465,11 @@ func (ss SQLSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for SQLSource.
+func (ss SQLSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for SQLSource.
func (ss SQLSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -173347,6 +186480,11 @@ func (ss SQLSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for SQLSource.
+func (ss SQLSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for SQLSource.
func (ss SQLSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -173461,7 +186599,7 @@ type SquareLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -173613,6 +186751,11 @@ func (sls SquareLinkedService) AsMarketoLinkedService() (*MarketoLinkedService,
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for SquareLinkedService.
+func (sls SquareLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for SquareLinkedService.
func (sls SquareLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -174160,7 +187303,7 @@ type SquareObjectDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -174296,6 +187439,11 @@ func (sod SquareObjectDataset) AsMarketoObjectDataset() (*MarketoObjectDataset,
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for SquareObjectDataset.
+func (sod SquareObjectDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for SquareObjectDataset.
func (sod SquareObjectDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -174421,6 +187569,16 @@ func (sod SquareObjectDataset) AsSapCloudForCustomerResourceDataset() (*SapCloud
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for SquareObjectDataset.
+func (sod SquareObjectDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for SquareObjectDataset.
+func (sod SquareObjectDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for SquareObjectDataset.
func (sod SquareObjectDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -174461,6 +187619,16 @@ func (sod SquareObjectDataset) AsRelationalTableDataset() (*RelationalTableDatas
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for SquareObjectDataset.
+func (sod SquareObjectDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for SquareObjectDataset.
+func (sod SquareObjectDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for SquareObjectDataset.
func (sod SquareObjectDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -174576,6 +187744,11 @@ func (sod SquareObjectDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for SquareObjectDataset.
+func (sod SquareObjectDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for SquareObjectDataset.
func (sod SquareObjectDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -174586,6 +187759,11 @@ func (sod SquareObjectDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for SquareObjectDataset.
+func (sod SquareObjectDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for SquareObjectDataset.
func (sod SquareObjectDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -174721,7 +187899,7 @@ type SquareSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -174845,6 +188023,11 @@ func (ss SquareSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for SquareSource.
+func (ss SquareSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for SquareSource.
func (ss SquareSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -175065,6 +188248,11 @@ func (ss SquareSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for SquareSource.
+func (ss SquareSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for SquareSource.
func (ss SquareSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -175140,6 +188328,11 @@ func (ss SquareSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for SquareSource.
+func (ss SquareSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for SquareSource.
func (ss SquareSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -175150,6 +188343,11 @@ func (ss SquareSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for SquareSource.
+func (ss SquareSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for SquareSource.
func (ss SquareSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -176105,36 +189303,125 @@ func (srs *StoreReadSettings) UnmarshalJSON(body []byte) error {
return nil
}
+// BasicStoreWriteSettings connector write settings.
+type BasicStoreWriteSettings interface {
+ AsFileServerWriteSettings() (*FileServerWriteSettings, bool)
+ AsAzureDataLakeStoreWriteSettings() (*AzureDataLakeStoreWriteSettings, bool)
+ AsAzureBlobFSWriteSettings() (*AzureBlobFSWriteSettings, bool)
+ AsAzureBlobStorageWriteSettings() (*AzureBlobStorageWriteSettings, bool)
+ AsStoreWriteSettings() (*StoreWriteSettings, bool)
+}
+
// StoreWriteSettings connector write settings.
type StoreWriteSettings struct {
// AdditionalProperties - Unmatched properties from the message are deserialized this collection
AdditionalProperties map[string]interface{} `json:""`
- // Type - The write setting type.
- Type *string `json:"type,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
// CopyBehavior - The type of copy behavior for copy sink.
CopyBehavior interface{} `json:"copyBehavior,omitempty"`
+ // Type - Possible values include: 'TypeStoreWriteSettings', 'TypeFileServerWriteSettings', 'TypeAzureDataLakeStoreWriteSettings', 'TypeAzureBlobFSWriteSettings', 'TypeAzureBlobStorageWriteSettings'
+ Type TypeBasicStoreWriteSettings `json:"type,omitempty"`
+}
+
+func unmarshalBasicStoreWriteSettings(body []byte) (BasicStoreWriteSettings, error) {
+ var m map[string]interface{}
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return nil, err
+ }
+
+ switch m["type"] {
+ case string(TypeFileServerWriteSettings):
+ var fsws FileServerWriteSettings
+ err := json.Unmarshal(body, &fsws)
+ return fsws, err
+ case string(TypeAzureDataLakeStoreWriteSettings):
+ var adlsws AzureDataLakeStoreWriteSettings
+ err := json.Unmarshal(body, &adlsws)
+ return adlsws, err
+ case string(TypeAzureBlobFSWriteSettings):
+ var abfws AzureBlobFSWriteSettings
+ err := json.Unmarshal(body, &abfws)
+ return abfws, err
+ case string(TypeAzureBlobStorageWriteSettings):
+ var absws AzureBlobStorageWriteSettings
+ err := json.Unmarshal(body, &absws)
+ return absws, err
+ default:
+ var sws StoreWriteSettings
+ err := json.Unmarshal(body, &sws)
+ return sws, err
+ }
+}
+func unmarshalBasicStoreWriteSettingsArray(body []byte) ([]BasicStoreWriteSettings, error) {
+ var rawMessages []*json.RawMessage
+ err := json.Unmarshal(body, &rawMessages)
+ if err != nil {
+ return nil, err
+ }
+
+ swsArray := make([]BasicStoreWriteSettings, len(rawMessages))
+
+ for index, rawMessage := range rawMessages {
+ sws, err := unmarshalBasicStoreWriteSettings(*rawMessage)
+ if err != nil {
+ return nil, err
+ }
+ swsArray[index] = sws
+ }
+ return swsArray, nil
}
// MarshalJSON is the custom marshaler for StoreWriteSettings.
func (sws StoreWriteSettings) MarshalJSON() ([]byte, error) {
+ sws.Type = TypeStoreWriteSettings
objectMap := make(map[string]interface{})
- if sws.Type != nil {
- objectMap["type"] = sws.Type
- }
if sws.MaxConcurrentConnections != nil {
objectMap["maxConcurrentConnections"] = sws.MaxConcurrentConnections
}
if sws.CopyBehavior != nil {
objectMap["copyBehavior"] = sws.CopyBehavior
}
+ if sws.Type != "" {
+ objectMap["type"] = sws.Type
+ }
for k, v := range sws.AdditionalProperties {
objectMap[k] = v
}
return json.Marshal(objectMap)
}
+// AsFileServerWriteSettings is the BasicStoreWriteSettings implementation for StoreWriteSettings.
+func (sws StoreWriteSettings) AsFileServerWriteSettings() (*FileServerWriteSettings, bool) {
+ return nil, false
+}
+
+// AsAzureDataLakeStoreWriteSettings is the BasicStoreWriteSettings implementation for StoreWriteSettings.
+func (sws StoreWriteSettings) AsAzureDataLakeStoreWriteSettings() (*AzureDataLakeStoreWriteSettings, bool) {
+ return nil, false
+}
+
+// AsAzureBlobFSWriteSettings is the BasicStoreWriteSettings implementation for StoreWriteSettings.
+func (sws StoreWriteSettings) AsAzureBlobFSWriteSettings() (*AzureBlobFSWriteSettings, bool) {
+ return nil, false
+}
+
+// AsAzureBlobStorageWriteSettings is the BasicStoreWriteSettings implementation for StoreWriteSettings.
+func (sws StoreWriteSettings) AsAzureBlobStorageWriteSettings() (*AzureBlobStorageWriteSettings, bool) {
+ return nil, false
+}
+
+// AsStoreWriteSettings is the BasicStoreWriteSettings implementation for StoreWriteSettings.
+func (sws StoreWriteSettings) AsStoreWriteSettings() (*StoreWriteSettings, bool) {
+ return &sws, true
+}
+
+// AsBasicStoreWriteSettings is the BasicStoreWriteSettings implementation for StoreWriteSettings.
+func (sws StoreWriteSettings) AsBasicStoreWriteSettings() (BasicStoreWriteSettings, bool) {
+ return &sws, true
+}
+
// UnmarshalJSON is the custom unmarshaler for StoreWriteSettings struct.
func (sws *StoreWriteSettings) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
@@ -176156,15 +189443,6 @@ func (sws *StoreWriteSettings) UnmarshalJSON(body []byte) error {
}
sws.AdditionalProperties[k] = additionalProperties
}
- case "type":
- if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
- if err != nil {
- return err
- }
- sws.Type = &typeVar
- }
case "maxConcurrentConnections":
if v != nil {
var maxConcurrentConnections interface{}
@@ -176183,6 +189461,15 @@ func (sws *StoreWriteSettings) UnmarshalJSON(body []byte) error {
}
sws.CopyBehavior = copyBehavior
}
+ case "type":
+ if v != nil {
+ var typeVar TypeBasicStoreWriteSettings
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ sws.Type = typeVar
+ }
}
}
@@ -176215,7 +189502,7 @@ type SybaseLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -176367,6 +189654,11 @@ func (sls SybaseLinkedService) AsMarketoLinkedService() (*MarketoLinkedService,
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for SybaseLinkedService.
+func (sls SybaseLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for SybaseLinkedService.
func (sls SybaseLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -176895,7 +190187,7 @@ type SybaseSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -177019,6 +190311,11 @@ func (ss SybaseSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for SybaseSource.
+func (ss SybaseSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for SybaseSource.
func (ss SybaseSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -177239,6 +190536,11 @@ func (ss SybaseSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for SybaseSource.
+func (ss SybaseSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for SybaseSource.
func (ss SybaseSource) AsSybaseSource() (*SybaseSource, bool) {
return &ss, true
@@ -177314,6 +190616,11 @@ func (ss SybaseSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for SybaseSource.
+func (ss SybaseSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for SybaseSource.
func (ss SybaseSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -177324,6 +190631,11 @@ func (ss SybaseSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for SybaseSource.
+func (ss SybaseSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for SybaseSource.
func (ss SybaseSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -177406,6 +190718,616 @@ func (ss *SybaseSource) UnmarshalJSON(body []byte) error {
return nil
}
+// SybaseTableDataset the Sybase table dataset.
+type SybaseTableDataset struct {
+ // SybaseTableDatasetTypeProperties - Sybase table dataset properties.
+ *SybaseTableDatasetTypeProperties `json:"typeProperties,omitempty"`
+ // AdditionalProperties - Unmatched properties from the message are deserialized this collection
+ AdditionalProperties map[string]interface{} `json:""`
+ // Description - Dataset description.
+ Description *string `json:"description,omitempty"`
+ // Structure - Columns that define the structure of the dataset. Type: array (or Expression with resultType array), itemType: DatasetDataElement.
+ Structure interface{} `json:"structure,omitempty"`
+ // Schema - Columns that define the physical type schema of the dataset. Type: array (or Expression with resultType array), itemType: DatasetSchemaDataElement.
+ Schema interface{} `json:"schema,omitempty"`
+ // LinkedServiceName - Linked service reference.
+ LinkedServiceName *LinkedServiceReference `json:"linkedServiceName,omitempty"`
+ // Parameters - Parameters for dataset.
+ Parameters map[string]*ParameterSpecification `json:"parameters"`
+ // Annotations - List of tags that can be used for describing the Dataset.
+ Annotations *[]interface{} `json:"annotations,omitempty"`
+ // Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
+ Folder *DatasetFolder `json:"folder,omitempty"`
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
+ Type TypeBasicDataset `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for SybaseTableDataset.
+func (std SybaseTableDataset) MarshalJSON() ([]byte, error) {
+ std.Type = TypeSybaseTable
+ objectMap := make(map[string]interface{})
+ if std.SybaseTableDatasetTypeProperties != nil {
+ objectMap["typeProperties"] = std.SybaseTableDatasetTypeProperties
+ }
+ if std.Description != nil {
+ objectMap["description"] = std.Description
+ }
+ if std.Structure != nil {
+ objectMap["structure"] = std.Structure
+ }
+ if std.Schema != nil {
+ objectMap["schema"] = std.Schema
+ }
+ if std.LinkedServiceName != nil {
+ objectMap["linkedServiceName"] = std.LinkedServiceName
+ }
+ if std.Parameters != nil {
+ objectMap["parameters"] = std.Parameters
+ }
+ if std.Annotations != nil {
+ objectMap["annotations"] = std.Annotations
+ }
+ if std.Folder != nil {
+ objectMap["folder"] = std.Folder
+ }
+ if std.Type != "" {
+ objectMap["type"] = std.Type
+ }
+ for k, v := range std.AdditionalProperties {
+ objectMap[k] = v
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsGoogleAdWordsObjectDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsGoogleAdWordsObjectDataset() (*GoogleAdWordsObjectDataset, bool) {
+ return nil, false
+}
+
+// AsAzureDataExplorerTableDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsAzureDataExplorerTableDataset() (*AzureDataExplorerTableDataset, bool) {
+ return nil, false
+}
+
+// AsOracleServiceCloudObjectDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsOracleServiceCloudObjectDataset() (*OracleServiceCloudObjectDataset, bool) {
+ return nil, false
+}
+
+// AsDynamicsAXResourceDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsDynamicsAXResourceDataset() (*DynamicsAXResourceDataset, bool) {
+ return nil, false
+}
+
+// AsResponsysObjectDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsResponsysObjectDataset() (*ResponsysObjectDataset, bool) {
+ return nil, false
+}
+
+// AsSalesforceMarketingCloudObjectDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsSalesforceMarketingCloudObjectDataset() (*SalesforceMarketingCloudObjectDataset, bool) {
+ return nil, false
+}
+
+// AsVerticaTableDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsVerticaTableDataset() (*VerticaTableDataset, bool) {
+ return nil, false
+}
+
+// AsNetezzaTableDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsNetezzaTableDataset() (*NetezzaTableDataset, bool) {
+ return nil, false
+}
+
+// AsZohoObjectDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsZohoObjectDataset() (*ZohoObjectDataset, bool) {
+ return nil, false
+}
+
+// AsXeroObjectDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsXeroObjectDataset() (*XeroObjectDataset, bool) {
+ return nil, false
+}
+
+// AsSquareObjectDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsSquareObjectDataset() (*SquareObjectDataset, bool) {
+ return nil, false
+}
+
+// AsSparkObjectDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsSparkObjectDataset() (*SparkObjectDataset, bool) {
+ return nil, false
+}
+
+// AsShopifyObjectDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsShopifyObjectDataset() (*ShopifyObjectDataset, bool) {
+ return nil, false
+}
+
+// AsServiceNowObjectDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsServiceNowObjectDataset() (*ServiceNowObjectDataset, bool) {
+ return nil, false
+}
+
+// AsQuickBooksObjectDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsQuickBooksObjectDataset() (*QuickBooksObjectDataset, bool) {
+ return nil, false
+}
+
+// AsPrestoObjectDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsPrestoObjectDataset() (*PrestoObjectDataset, bool) {
+ return nil, false
+}
+
+// AsPhoenixObjectDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsPhoenixObjectDataset() (*PhoenixObjectDataset, bool) {
+ return nil, false
+}
+
+// AsPaypalObjectDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsPaypalObjectDataset() (*PaypalObjectDataset, bool) {
+ return nil, false
+}
+
+// AsMarketoObjectDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsMarketoObjectDataset() (*MarketoObjectDataset, bool) {
+ return nil, false
+}
+
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
+// AsMariaDBTableDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
+ return nil, false
+}
+
+// AsMagentoObjectDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsMagentoObjectDataset() (*MagentoObjectDataset, bool) {
+ return nil, false
+}
+
+// AsJiraObjectDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsJiraObjectDataset() (*JiraObjectDataset, bool) {
+ return nil, false
+}
+
+// AsImpalaObjectDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsImpalaObjectDataset() (*ImpalaObjectDataset, bool) {
+ return nil, false
+}
+
+// AsHubspotObjectDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsHubspotObjectDataset() (*HubspotObjectDataset, bool) {
+ return nil, false
+}
+
+// AsHiveObjectDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsHiveObjectDataset() (*HiveObjectDataset, bool) {
+ return nil, false
+}
+
+// AsHBaseObjectDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsHBaseObjectDataset() (*HBaseObjectDataset, bool) {
+ return nil, false
+}
+
+// AsGreenplumTableDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsGreenplumTableDataset() (*GreenplumTableDataset, bool) {
+ return nil, false
+}
+
+// AsGoogleBigQueryObjectDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsGoogleBigQueryObjectDataset() (*GoogleBigQueryObjectDataset, bool) {
+ return nil, false
+}
+
+// AsEloquaObjectDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsEloquaObjectDataset() (*EloquaObjectDataset, bool) {
+ return nil, false
+}
+
+// AsDrillTableDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsDrillTableDataset() (*DrillTableDataset, bool) {
+ return nil, false
+}
+
+// AsCouchbaseTableDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsCouchbaseTableDataset() (*CouchbaseTableDataset, bool) {
+ return nil, false
+}
+
+// AsConcurObjectDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsConcurObjectDataset() (*ConcurObjectDataset, bool) {
+ return nil, false
+}
+
+// AsAzurePostgreSQLTableDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsAzurePostgreSQLTableDataset() (*AzurePostgreSQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonMWSObjectDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsAmazonMWSObjectDataset() (*AmazonMWSObjectDataset, bool) {
+ return nil, false
+}
+
+// AsHTTPDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsHTTPDataset() (*HTTPDataset, bool) {
+ return nil, false
+}
+
+// AsAzureSearchIndexDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsAzureSearchIndexDataset() (*AzureSearchIndexDataset, bool) {
+ return nil, false
+}
+
+// AsWebTableDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsWebTableDataset() (*WebTableDataset, bool) {
+ return nil, false
+}
+
+// AsSapTableResourceDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsSapTableResourceDataset() (*SapTableResourceDataset, bool) {
+ return nil, false
+}
+
+// AsRestResourceDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsRestResourceDataset() (*RestResourceDataset, bool) {
+ return nil, false
+}
+
+// AsSQLServerTableDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsSQLServerTableDataset() (*SQLServerTableDataset, bool) {
+ return nil, false
+}
+
+// AsSapOpenHubTableDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsSapOpenHubTableDataset() (*SapOpenHubTableDataset, bool) {
+ return nil, false
+}
+
+// AsSapHanaTableDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsSapHanaTableDataset() (*SapHanaTableDataset, bool) {
+ return nil, false
+}
+
+// AsSapEccResourceDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsSapEccResourceDataset() (*SapEccResourceDataset, bool) {
+ return nil, false
+}
+
+// AsSapCloudForCustomerResourceDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsSapCloudForCustomerResourceDataset() (*SapCloudForCustomerResourceDataset, bool) {
+ return nil, false
+}
+
+// AsSapBwCubeDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return &std, true
+}
+
+// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
+ return nil, false
+}
+
+// AsSalesforceObjectDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsSalesforceObjectDataset() (*SalesforceObjectDataset, bool) {
+ return nil, false
+}
+
+// AsMicrosoftAccessTableDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsMicrosoftAccessTableDataset() (*MicrosoftAccessTableDataset, bool) {
+ return nil, false
+}
+
+// AsPostgreSQLTableDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsPostgreSQLTableDataset() (*PostgreSQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsMySQLTableDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsMySQLTableDataset() (*MySQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsOdbcTableDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsOdbcTableDataset() (*OdbcTableDataset, bool) {
+ return nil, false
+}
+
+// AsInformixTableDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsInformixTableDataset() (*InformixTableDataset, bool) {
+ return nil, false
+}
+
+// AsRelationalTableDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsRelationalTableDataset() (*RelationalTableDataset, bool) {
+ return nil, false
+}
+
+// AsDb2TableDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureMySQLTableDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsTeradataTableDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsTeradataTableDataset() (*TeradataTableDataset, bool) {
+ return nil, false
+}
+
+// AsOracleTableDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsOracleTableDataset() (*OracleTableDataset, bool) {
+ return nil, false
+}
+
+// AsODataResourceDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsODataResourceDataset() (*ODataResourceDataset, bool) {
+ return nil, false
+}
+
+// AsCosmosDbMongoDbAPICollectionDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsCosmosDbMongoDbAPICollectionDataset() (*CosmosDbMongoDbAPICollectionDataset, bool) {
+ return nil, false
+}
+
+// AsMongoDbV2CollectionDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsMongoDbV2CollectionDataset() (*MongoDbV2CollectionDataset, bool) {
+ return nil, false
+}
+
+// AsMongoDbCollectionDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsMongoDbCollectionDataset() (*MongoDbCollectionDataset, bool) {
+ return nil, false
+}
+
+// AsFileShareDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsFileShareDataset() (*FileShareDataset, bool) {
+ return nil, false
+}
+
+// AsOffice365Dataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsOffice365Dataset() (*Office365Dataset, bool) {
+ return nil, false
+}
+
+// AsAzureBlobFSDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsAzureBlobFSDataset() (*AzureBlobFSDataset, bool) {
+ return nil, false
+}
+
+// AsAzureDataLakeStoreDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsAzureDataLakeStoreDataset() (*AzureDataLakeStoreDataset, bool) {
+ return nil, false
+}
+
+// AsCommonDataServiceForAppsEntityDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsCommonDataServiceForAppsEntityDataset() (*CommonDataServiceForAppsEntityDataset, bool) {
+ return nil, false
+}
+
+// AsDynamicsCrmEntityDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsDynamicsCrmEntityDataset() (*DynamicsCrmEntityDataset, bool) {
+ return nil, false
+}
+
+// AsDynamicsEntityDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsDynamicsEntityDataset() (*DynamicsEntityDataset, bool) {
+ return nil, false
+}
+
+// AsDocumentDbCollectionDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsDocumentDbCollectionDataset() (*DocumentDbCollectionDataset, bool) {
+ return nil, false
+}
+
+// AsCustomDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsCustomDataset() (*CustomDataset, bool) {
+ return nil, false
+}
+
+// AsCassandraTableDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsCassandraTableDataset() (*CassandraTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureSQLDWTableDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsAzureSQLDWTableDataset() (*AzureSQLDWTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureSQLMITableDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsAzureSQLMITableDataset() (*AzureSQLMITableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureSQLTableDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsAzureSQLTableDataset() (*AzureSQLTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureTableDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsAzureTableDataset() (*AzureTableDataset, bool) {
+ return nil, false
+}
+
+// AsAzureBlobDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsAzureBlobDataset() (*AzureBlobDataset, bool) {
+ return nil, false
+}
+
+// AsBinaryDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsBinaryDataset() (*BinaryDataset, bool) {
+ return nil, false
+}
+
+// AsJSONDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
+// AsDelimitedTextDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
+ return nil, false
+}
+
+// AsParquetDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsParquetDataset() (*ParquetDataset, bool) {
+ return nil, false
+}
+
+// AsAvroDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonS3Dataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
+ return nil, false
+}
+
+// AsDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsDataset() (*Dataset, bool) {
+ return nil, false
+}
+
+// AsBasicDataset is the BasicDataset implementation for SybaseTableDataset.
+func (std SybaseTableDataset) AsBasicDataset() (BasicDataset, bool) {
+ return &std, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for SybaseTableDataset struct.
+func (std *SybaseTableDataset) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "typeProperties":
+ if v != nil {
+ var sybaseTableDatasetTypeProperties SybaseTableDatasetTypeProperties
+ err = json.Unmarshal(*v, &sybaseTableDatasetTypeProperties)
+ if err != nil {
+ return err
+ }
+ std.SybaseTableDatasetTypeProperties = &sybaseTableDatasetTypeProperties
+ }
+ default:
+ if v != nil {
+ var additionalProperties interface{}
+ err = json.Unmarshal(*v, &additionalProperties)
+ if err != nil {
+ return err
+ }
+ if std.AdditionalProperties == nil {
+ std.AdditionalProperties = make(map[string]interface{})
+ }
+ std.AdditionalProperties[k] = additionalProperties
+ }
+ case "description":
+ if v != nil {
+ var description string
+ err = json.Unmarshal(*v, &description)
+ if err != nil {
+ return err
+ }
+ std.Description = &description
+ }
+ case "structure":
+ if v != nil {
+ var structure interface{}
+ err = json.Unmarshal(*v, &structure)
+ if err != nil {
+ return err
+ }
+ std.Structure = structure
+ }
+ case "schema":
+ if v != nil {
+ var schema interface{}
+ err = json.Unmarshal(*v, &schema)
+ if err != nil {
+ return err
+ }
+ std.Schema = schema
+ }
+ case "linkedServiceName":
+ if v != nil {
+ var linkedServiceName LinkedServiceReference
+ err = json.Unmarshal(*v, &linkedServiceName)
+ if err != nil {
+ return err
+ }
+ std.LinkedServiceName = &linkedServiceName
+ }
+ case "parameters":
+ if v != nil {
+ var parameters map[string]*ParameterSpecification
+ err = json.Unmarshal(*v, ¶meters)
+ if err != nil {
+ return err
+ }
+ std.Parameters = parameters
+ }
+ case "annotations":
+ if v != nil {
+ var annotations []interface{}
+ err = json.Unmarshal(*v, &annotations)
+ if err != nil {
+ return err
+ }
+ std.Annotations = &annotations
+ }
+ case "folder":
+ if v != nil {
+ var folder DatasetFolder
+ err = json.Unmarshal(*v, &folder)
+ if err != nil {
+ return err
+ }
+ std.Folder = &folder
+ }
+ case "type":
+ if v != nil {
+ var typeVar TypeBasicDataset
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ std.Type = typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// SybaseTableDatasetTypeProperties sybase table dataset properties.
+type SybaseTableDatasetTypeProperties struct {
+ // TableName - The Sybase table name. Type: string (or Expression with resultType string).
+ TableName interface{} `json:"tableName,omitempty"`
+}
+
// TeradataLinkedService linked service for Teradata data source.
type TeradataLinkedService struct {
// TeradataLinkedServiceTypeProperties - Teradata linked service properties.
@@ -177420,7 +191342,7 @@ type TeradataLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -177572,6 +191494,11 @@ func (TLSVar TeradataLinkedService) AsMarketoLinkedService() (*MarketoLinkedServ
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for TeradataLinkedService.
+func (TLSVar TeradataLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for TeradataLinkedService.
func (TLSVar TeradataLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -178103,7 +192030,7 @@ type TeradataSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -178233,6 +192160,11 @@ func (ts TeradataSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for TeradataSource.
+func (ts TeradataSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for TeradataSource.
func (ts TeradataSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -178453,6 +192385,11 @@ func (ts TeradataSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for TeradataSource.
+func (ts TeradataSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for TeradataSource.
func (ts TeradataSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -178528,6 +192465,11 @@ func (ts TeradataSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for TeradataSource.
+func (ts TeradataSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for TeradataSource.
func (ts TeradataSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -178538,6 +192480,11 @@ func (ts TeradataSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for TeradataSource.
+func (ts TeradataSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for TeradataSource.
func (ts TeradataSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -178658,7 +192605,7 @@ type TeradataTableDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -178794,6 +192741,11 @@ func (ttd TeradataTableDataset) AsMarketoObjectDataset() (*MarketoObjectDataset,
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for TeradataTableDataset.
+func (ttd TeradataTableDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for TeradataTableDataset.
func (ttd TeradataTableDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -178919,6 +192871,16 @@ func (ttd TeradataTableDataset) AsSapCloudForCustomerResourceDataset() (*SapClou
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for TeradataTableDataset.
+func (ttd TeradataTableDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for TeradataTableDataset.
+func (ttd TeradataTableDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for TeradataTableDataset.
func (ttd TeradataTableDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -178959,6 +192921,16 @@ func (ttd TeradataTableDataset) AsRelationalTableDataset() (*RelationalTableData
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for TeradataTableDataset.
+func (ttd TeradataTableDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for TeradataTableDataset.
+func (ttd TeradataTableDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for TeradataTableDataset.
func (ttd TeradataTableDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -179074,6 +193046,11 @@ func (ttd TeradataTableDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for TeradataTableDataset.
+func (ttd TeradataTableDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for TeradataTableDataset.
func (ttd TeradataTableDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -179084,6 +193061,11 @@ func (ttd TeradataTableDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for TeradataTableDataset.
+func (ttd TeradataTableDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for TeradataTableDataset.
func (ttd TeradataTableDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -179479,7 +193461,7 @@ type Trigger struct {
AdditionalProperties map[string]interface{} `json:""`
// Description - Trigger description.
Description *string `json:"description,omitempty"`
- // RuntimeState - READ-ONLY; Indicates if trigger is running or not. Updated when Start/Stop APIs are called on the Trigger. Possible values include: 'Started', 'Stopped', 'Disabled'
+ // RuntimeState - READ-ONLY; Indicates if trigger is running or not. Updated when Start/Stop APIs are called on the Trigger. Possible values include: 'TriggerRuntimeStateStarted', 'TriggerRuntimeStateStopped', 'TriggerRuntimeStateDisabled'
RuntimeState TriggerRuntimeState `json:"runtimeState,omitempty"`
// Annotations - List of tags that can be used for describing the trigger.
Annotations *[]interface{} `json:"annotations,omitempty"`
@@ -180197,6 +194179,73 @@ func (future *TriggersStopFuture) Result(client TriggersClient) (ar autorest.Res
return
}
+// TriggersSubscribeToEventsFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type TriggersSubscribeToEventsFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *TriggersSubscribeToEventsFuture) Result(client TriggersClient) (tsos TriggerSubscriptionOperationStatus, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "datafactory.TriggersSubscribeToEventsFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("datafactory.TriggersSubscribeToEventsFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if tsos.Response.Response, err = future.GetResult(sender); err == nil && tsos.Response.Response.StatusCode != http.StatusNoContent {
+ tsos, err = client.SubscribeToEventsResponder(tsos.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "datafactory.TriggersSubscribeToEventsFuture", "Result", tsos.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// TriggerSubscriptionOperationStatus defines the response of a trigger subscription operation.
+type TriggerSubscriptionOperationStatus struct {
+ autorest.Response `json:"-"`
+ // TriggerName - READ-ONLY; Trigger name.
+ TriggerName *string `json:"triggerName,omitempty"`
+ // Status - READ-ONLY; Event Subscription Status. Possible values include: 'Enabled', 'Provisioning', 'Deprovisioning', 'Disabled', 'Unknown'
+ Status EventSubscriptionStatus `json:"status,omitempty"`
+}
+
+// TriggersUnsubscribeFromEventsFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type TriggersUnsubscribeFromEventsFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *TriggersUnsubscribeFromEventsFuture) Result(client TriggersClient) (tsos TriggerSubscriptionOperationStatus, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "datafactory.TriggersUnsubscribeFromEventsFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("datafactory.TriggersUnsubscribeFromEventsFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if tsos.Response.Response, err = future.GetResult(sender); err == nil && tsos.Response.Response.StatusCode != http.StatusNoContent {
+ tsos, err = client.UnsubscribeFromEventsResponder(tsos.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "datafactory.TriggersUnsubscribeFromEventsFuture", "Result", tsos.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
// TumblingWindowTrigger trigger that schedules pipeline runs for all fixed time interval windows from a
// start time without gaps and also supports backfill scenarios (when start time is in the past).
type TumblingWindowTrigger struct {
@@ -180208,7 +194257,7 @@ type TumblingWindowTrigger struct {
AdditionalProperties map[string]interface{} `json:""`
// Description - Trigger description.
Description *string `json:"description,omitempty"`
- // RuntimeState - READ-ONLY; Indicates if trigger is running or not. Updated when Start/Stop APIs are called on the Trigger. Possible values include: 'Started', 'Stopped', 'Disabled'
+ // RuntimeState - READ-ONLY; Indicates if trigger is running or not. Updated when Start/Stop APIs are called on the Trigger. Possible values include: 'TriggerRuntimeStateStarted', 'TriggerRuntimeStateStopped', 'TriggerRuntimeStateDisabled'
RuntimeState TriggerRuntimeState `json:"runtimeState,omitempty"`
// Annotations - List of tags that can be used for describing the trigger.
Annotations *[]interface{} `json:"annotations,omitempty"`
@@ -181268,6 +195317,16 @@ type VariableSpecification struct {
DefaultValue interface{} `json:"defaultValue,omitempty"`
}
+// VerticaDatasetTypeProperties vertica Properties
+type VerticaDatasetTypeProperties struct {
+ // TableName - This property will be retired. Please consider using schema + table properties instead.
+ TableName interface{} `json:"tableName,omitempty"`
+ // Table - The table name of the Vertica. Type: string (or Expression with resultType string).
+ Table interface{} `json:"table,omitempty"`
+ // Schema - The schema name of the Vertica. Type: string (or Expression with resultType string).
+ Schema interface{} `json:"schema,omitempty"`
+}
+
// VerticaLinkedService vertica linked service.
type VerticaLinkedService struct {
// VerticaLinkedServiceTypeProperties - Vertica linked service properties.
@@ -181282,7 +195341,7 @@ type VerticaLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -181434,6 +195493,11 @@ func (vls VerticaLinkedService) AsMarketoLinkedService() (*MarketoLinkedService,
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for VerticaLinkedService.
+func (vls VerticaLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for VerticaLinkedService.
func (vls VerticaLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -181877,7 +195941,7 @@ type VerticaSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -182001,6 +196065,11 @@ func (vs VerticaSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for VerticaSource.
+func (vs VerticaSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for VerticaSource.
func (vs VerticaSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -182221,6 +196290,11 @@ func (vs VerticaSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for VerticaSource.
+func (vs VerticaSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for VerticaSource.
func (vs VerticaSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -182296,6 +196370,11 @@ func (vs VerticaSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for VerticaSource.
+func (vs VerticaSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for VerticaSource.
func (vs VerticaSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -182306,6 +196385,11 @@ func (vs VerticaSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for VerticaSource.
+func (vs VerticaSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for VerticaSource.
func (vs VerticaSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -182390,8 +196474,8 @@ func (vs *VerticaSource) UnmarshalJSON(body []byte) error {
// VerticaTableDataset vertica dataset.
type VerticaTableDataset struct {
- // GenericDatasetTypeProperties - Properties specific to this dataset type.
- *GenericDatasetTypeProperties `json:"typeProperties,omitempty"`
+ // VerticaDatasetTypeProperties - Properties specific to this dataset type.
+ *VerticaDatasetTypeProperties `json:"typeProperties,omitempty"`
// AdditionalProperties - Unmatched properties from the message are deserialized this collection
AdditionalProperties map[string]interface{} `json:""`
// Description - Dataset description.
@@ -182408,7 +196492,7 @@ type VerticaTableDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -182416,8 +196500,8 @@ type VerticaTableDataset struct {
func (vtd VerticaTableDataset) MarshalJSON() ([]byte, error) {
vtd.Type = TypeVerticaTable
objectMap := make(map[string]interface{})
- if vtd.GenericDatasetTypeProperties != nil {
- objectMap["typeProperties"] = vtd.GenericDatasetTypeProperties
+ if vtd.VerticaDatasetTypeProperties != nil {
+ objectMap["typeProperties"] = vtd.VerticaDatasetTypeProperties
}
if vtd.Description != nil {
objectMap["description"] = vtd.Description
@@ -182544,6 +196628,11 @@ func (vtd VerticaTableDataset) AsMarketoObjectDataset() (*MarketoObjectDataset,
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for VerticaTableDataset.
+func (vtd VerticaTableDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for VerticaTableDataset.
func (vtd VerticaTableDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -182669,6 +196758,16 @@ func (vtd VerticaTableDataset) AsSapCloudForCustomerResourceDataset() (*SapCloud
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for VerticaTableDataset.
+func (vtd VerticaTableDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for VerticaTableDataset.
+func (vtd VerticaTableDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for VerticaTableDataset.
func (vtd VerticaTableDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -182709,6 +196808,16 @@ func (vtd VerticaTableDataset) AsRelationalTableDataset() (*RelationalTableDatas
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for VerticaTableDataset.
+func (vtd VerticaTableDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for VerticaTableDataset.
+func (vtd VerticaTableDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for VerticaTableDataset.
func (vtd VerticaTableDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -182824,6 +196933,11 @@ func (vtd VerticaTableDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for VerticaTableDataset.
+func (vtd VerticaTableDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for VerticaTableDataset.
func (vtd VerticaTableDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -182834,6 +196948,11 @@ func (vtd VerticaTableDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for VerticaTableDataset.
+func (vtd VerticaTableDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for VerticaTableDataset.
func (vtd VerticaTableDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -182860,12 +196979,12 @@ func (vtd *VerticaTableDataset) UnmarshalJSON(body []byte) error {
switch k {
case "typeProperties":
if v != nil {
- var genericDatasetTypeProperties GenericDatasetTypeProperties
- err = json.Unmarshal(*v, &genericDatasetTypeProperties)
+ var verticaDatasetTypeProperties VerticaDatasetTypeProperties
+ err = json.Unmarshal(*v, &verticaDatasetTypeProperties)
if err != nil {
return err
}
- vtd.GenericDatasetTypeProperties = &genericDatasetTypeProperties
+ vtd.VerticaDatasetTypeProperties = &verticaDatasetTypeProperties
}
default:
if v != nil {
@@ -184244,7 +198363,7 @@ type WebLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -184394,6 +198513,11 @@ func (wls WebLinkedService) AsMarketoLinkedService() (*MarketoLinkedService, boo
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for WebLinkedService.
+func (wls WebLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for WebLinkedService.
func (wls WebLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -184925,7 +199049,7 @@ type WebSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -185046,6 +199170,11 @@ func (ws WebSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for WebSource.
+func (ws WebSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for WebSource.
func (ws WebSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -185266,6 +199395,11 @@ func (ws WebSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for WebSource.
+func (ws WebSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for WebSource.
func (ws WebSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -185341,6 +199475,11 @@ func (ws WebSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for WebSource.
+func (ws WebSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for WebSource.
func (ws WebSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -185351,6 +199490,11 @@ func (ws WebSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for WebSource.
+func (ws WebSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for WebSource.
func (ws WebSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -185444,7 +199588,7 @@ type WebTableDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -185580,6 +199724,11 @@ func (wtd WebTableDataset) AsMarketoObjectDataset() (*MarketoObjectDataset, bool
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for WebTableDataset.
+func (wtd WebTableDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for WebTableDataset.
func (wtd WebTableDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -185705,6 +199854,16 @@ func (wtd WebTableDataset) AsSapCloudForCustomerResourceDataset() (*SapCloudForC
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for WebTableDataset.
+func (wtd WebTableDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for WebTableDataset.
+func (wtd WebTableDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for WebTableDataset.
func (wtd WebTableDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -185745,6 +199904,16 @@ func (wtd WebTableDataset) AsRelationalTableDataset() (*RelationalTableDataset,
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for WebTableDataset.
+func (wtd WebTableDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for WebTableDataset.
+func (wtd WebTableDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for WebTableDataset.
func (wtd WebTableDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -185860,6 +200029,11 @@ func (wtd WebTableDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for WebTableDataset.
+func (wtd WebTableDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for WebTableDataset.
func (wtd WebTableDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -185870,6 +200044,11 @@ func (wtd WebTableDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for WebTableDataset.
+func (wtd WebTableDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for WebTableDataset.
func (wtd WebTableDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -186015,7 +200194,7 @@ type XeroLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -186167,6 +200346,11 @@ func (xls XeroLinkedService) AsMarketoLinkedService() (*MarketoLinkedService, bo
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for XeroLinkedService.
+func (xls XeroLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for XeroLinkedService.
func (xls XeroLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -186703,7 +200887,7 @@ type XeroObjectDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -186839,6 +201023,11 @@ func (xod XeroObjectDataset) AsMarketoObjectDataset() (*MarketoObjectDataset, bo
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for XeroObjectDataset.
+func (xod XeroObjectDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for XeroObjectDataset.
func (xod XeroObjectDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -186964,6 +201153,16 @@ func (xod XeroObjectDataset) AsSapCloudForCustomerResourceDataset() (*SapCloudFo
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for XeroObjectDataset.
+func (xod XeroObjectDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for XeroObjectDataset.
+func (xod XeroObjectDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for XeroObjectDataset.
func (xod XeroObjectDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -187004,6 +201203,16 @@ func (xod XeroObjectDataset) AsRelationalTableDataset() (*RelationalTableDataset
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for XeroObjectDataset.
+func (xod XeroObjectDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for XeroObjectDataset.
+func (xod XeroObjectDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for XeroObjectDataset.
func (xod XeroObjectDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -187119,6 +201328,11 @@ func (xod XeroObjectDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for XeroObjectDataset.
+func (xod XeroObjectDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for XeroObjectDataset.
func (xod XeroObjectDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -187129,6 +201343,11 @@ func (xod XeroObjectDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for XeroObjectDataset.
+func (xod XeroObjectDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for XeroObjectDataset.
func (xod XeroObjectDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -187264,7 +201483,7 @@ type XeroSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -187388,6 +201607,11 @@ func (xs XeroSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for XeroSource.
+func (xs XeroSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for XeroSource.
func (xs XeroSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -187608,6 +201832,11 @@ func (xs XeroSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for XeroSource.
+func (xs XeroSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for XeroSource.
func (xs XeroSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -187683,6 +201912,11 @@ func (xs XeroSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for XeroSource.
+func (xs XeroSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for XeroSource.
func (xs XeroSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -187693,6 +201927,11 @@ func (xs XeroSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for XeroSource.
+func (xs XeroSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for XeroSource.
func (xs XeroSource) AsCopySource() (*CopySource, bool) {
return nil, false
@@ -187789,7 +202028,7 @@ type ZohoLinkedService struct {
Parameters map[string]*ParameterSpecification `json:"parameters"`
// Annotations - List of tags that can be used for describing the linked service.
Annotations *[]interface{} `json:"annotations,omitempty"`
- // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
+ // Type - Possible values include: 'TypeLinkedService', 'TypeAzureFunction', 'TypeAzureDataExplorer', 'TypeSapTable', 'TypeGoogleAdWords', 'TypeOracleServiceCloud', 'TypeDynamicsAX', 'TypeResponsys', 'TypeAzureDatabricks', 'TypeAzureDataLakeAnalytics', 'TypeHDInsightOnDemand', 'TypeSalesforceMarketingCloud', 'TypeNetezza', 'TypeVertica', 'TypeZoho', 'TypeXero', 'TypeSquare', 'TypeSpark', 'TypeShopify', 'TypeServiceNow', 'TypeQuickBooks', 'TypePresto', 'TypePhoenix', 'TypePaypal', 'TypeMarketo', 'TypeAzureMariaDB', 'TypeMariaDB', 'TypeMagento', 'TypeJira', 'TypeImpala', 'TypeHubspot', 'TypeHive', 'TypeHBase', 'TypeGreenplum', 'TypeGoogleBigQuery', 'TypeEloqua', 'TypeDrill', 'TypeCouchbase', 'TypeConcur', 'TypeAzurePostgreSQL', 'TypeAmazonMWS', 'TypeSapHana', 'TypeSapBW', 'TypeSftp', 'TypeFtpServer', 'TypeHTTPServer', 'TypeAzureSearch', 'TypeCustomDataSource', 'TypeAmazonRedshift', 'TypeAmazonS3', 'TypeRestService', 'TypeSapOpenHub', 'TypeSapEcc', 'TypeSapCloudForCustomer', 'TypeSalesforceServiceCloud', 'TypeSalesforce', 'TypeOffice365', 'TypeAzureBlobFS', 'TypeAzureDataLakeStore', 'TypeCosmosDbMongoDbAPI', 'TypeMongoDbV2', 'TypeMongoDb', 'TypeCassandra', 'TypeWeb', 'TypeOData', 'TypeHdfs', 'TypeMicrosoftAccess', 'TypeInformix', 'TypeOdbc', 'TypeAzureML', 'TypeTeradata', 'TypeDb2', 'TypeSybase', 'TypePostgreSQL', 'TypeMySQL', 'TypeAzureMySQL', 'TypeOracle', 'TypeFileServer', 'TypeHDInsight', 'TypeCommonDataServiceForApps', 'TypeDynamicsCrm', 'TypeDynamics', 'TypeCosmosDb', 'TypeAzureKeyVault', 'TypeAzureBatch', 'TypeAzureSQLMI', 'TypeAzureSQLDatabase', 'TypeSQLServer', 'TypeAzureSQLDW', 'TypeAzureTableStorage', 'TypeAzureBlobStorage', 'TypeAzureStorage'
Type TypeBasicLinkedService `json:"type,omitempty"`
}
@@ -187941,6 +202180,11 @@ func (zls ZohoLinkedService) AsMarketoLinkedService() (*MarketoLinkedService, bo
return nil, false
}
+// AsAzureMariaDBLinkedService is the BasicLinkedService implementation for ZohoLinkedService.
+func (zls ZohoLinkedService) AsAzureMariaDBLinkedService() (*AzureMariaDBLinkedService, bool) {
+ return nil, false
+}
+
// AsMariaDBLinkedService is the BasicLinkedService implementation for ZohoLinkedService.
func (zls ZohoLinkedService) AsMariaDBLinkedService() (*MariaDBLinkedService, bool) {
return nil, false
@@ -188466,7 +202710,7 @@ type ZohoObjectDataset struct {
Annotations *[]interface{} `json:"annotations,omitempty"`
// Folder - The folder that this Dataset is in. If not specified, Dataset will appear at the root level.
Folder *DatasetFolder `json:"folder,omitempty"`
- // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeDelimitedText', 'TypeParquet', 'TypeAmazonS3Object'
+ // Type - Possible values include: 'TypeDataset', 'TypeGoogleAdWordsObject', 'TypeAzureDataExplorerTable', 'TypeOracleServiceCloudObject', 'TypeDynamicsAXResource', 'TypeResponsysObject', 'TypeSalesforceMarketingCloudObject', 'TypeVerticaTable', 'TypeNetezzaTable', 'TypeZohoObject', 'TypeXeroObject', 'TypeSquareObject', 'TypeSparkObject', 'TypeShopifyObject', 'TypeServiceNowObject', 'TypeQuickBooksObject', 'TypePrestoObject', 'TypePhoenixObject', 'TypePaypalObject', 'TypeMarketoObject', 'TypeAzureMariaDBTable', 'TypeMariaDBTable', 'TypeMagentoObject', 'TypeJiraObject', 'TypeImpalaObject', 'TypeHubspotObject', 'TypeHiveObject', 'TypeHBaseObject', 'TypeGreenplumTable', 'TypeGoogleBigQueryObject', 'TypeEloquaObject', 'TypeDrillTable', 'TypeCouchbaseTable', 'TypeConcurObject', 'TypeAzurePostgreSQLTable', 'TypeAmazonMWSObject', 'TypeHTTPFile', 'TypeAzureSearchIndex', 'TypeWebTable', 'TypeSapTableResource', 'TypeRestResource', 'TypeSQLServerTable', 'TypeSapOpenHubTable', 'TypeSapHanaTable', 'TypeSapEccResource', 'TypeSapCloudForCustomerResource', 'TypeSapBwCube', 'TypeSybaseTable', 'TypeSalesforceServiceCloudObject', 'TypeSalesforceObject', 'TypeMicrosoftAccessTable', 'TypePostgreSQLTable', 'TypeMySQLTable', 'TypeOdbcTable', 'TypeInformixTable', 'TypeRelationalTable', 'TypeDb2Table', 'TypeAmazonRedshiftTable', 'TypeAzureMySQLTable', 'TypeTeradataTable', 'TypeOracleTable', 'TypeODataResource', 'TypeCosmosDbMongoDbAPICollection', 'TypeMongoDbV2Collection', 'TypeMongoDbCollection', 'TypeFileShare', 'TypeOffice365Table', 'TypeAzureBlobFSFile', 'TypeAzureDataLakeStoreFile', 'TypeCommonDataServiceForAppsEntity', 'TypeDynamicsCrmEntity', 'TypeDynamicsEntity', 'TypeDocumentDbCollection', 'TypeCustomDataset', 'TypeCassandraTable', 'TypeAzureSQLDWTable', 'TypeAzureSQLMITable', 'TypeAzureSQLTable', 'TypeAzureTable', 'TypeAzureBlob', 'TypeBinary', 'TypeJSON', 'TypeDelimitedText', 'TypeParquet', 'TypeAvro', 'TypeAmazonS3Object'
Type TypeBasicDataset `json:"type,omitempty"`
}
@@ -188602,6 +202846,11 @@ func (zod ZohoObjectDataset) AsMarketoObjectDataset() (*MarketoObjectDataset, bo
return nil, false
}
+// AsAzureMariaDBTableDataset is the BasicDataset implementation for ZohoObjectDataset.
+func (zod ZohoObjectDataset) AsAzureMariaDBTableDataset() (*AzureMariaDBTableDataset, bool) {
+ return nil, false
+}
+
// AsMariaDBTableDataset is the BasicDataset implementation for ZohoObjectDataset.
func (zod ZohoObjectDataset) AsMariaDBTableDataset() (*MariaDBTableDataset, bool) {
return nil, false
@@ -188727,6 +202976,16 @@ func (zod ZohoObjectDataset) AsSapCloudForCustomerResourceDataset() (*SapCloudFo
return nil, false
}
+// AsSapBwCubeDataset is the BasicDataset implementation for ZohoObjectDataset.
+func (zod ZohoObjectDataset) AsSapBwCubeDataset() (*SapBwCubeDataset, bool) {
+ return nil, false
+}
+
+// AsSybaseTableDataset is the BasicDataset implementation for ZohoObjectDataset.
+func (zod ZohoObjectDataset) AsSybaseTableDataset() (*SybaseTableDataset, bool) {
+ return nil, false
+}
+
// AsSalesforceServiceCloudObjectDataset is the BasicDataset implementation for ZohoObjectDataset.
func (zod ZohoObjectDataset) AsSalesforceServiceCloudObjectDataset() (*SalesforceServiceCloudObjectDataset, bool) {
return nil, false
@@ -188767,6 +203026,16 @@ func (zod ZohoObjectDataset) AsRelationalTableDataset() (*RelationalTableDataset
return nil, false
}
+// AsDb2TableDataset is the BasicDataset implementation for ZohoObjectDataset.
+func (zod ZohoObjectDataset) AsDb2TableDataset() (*Db2TableDataset, bool) {
+ return nil, false
+}
+
+// AsAmazonRedshiftTableDataset is the BasicDataset implementation for ZohoObjectDataset.
+func (zod ZohoObjectDataset) AsAmazonRedshiftTableDataset() (*AmazonRedshiftTableDataset, bool) {
+ return nil, false
+}
+
// AsAzureMySQLTableDataset is the BasicDataset implementation for ZohoObjectDataset.
func (zod ZohoObjectDataset) AsAzureMySQLTableDataset() (*AzureMySQLTableDataset, bool) {
return nil, false
@@ -188882,6 +203151,11 @@ func (zod ZohoObjectDataset) AsBinaryDataset() (*BinaryDataset, bool) {
return nil, false
}
+// AsJSONDataset is the BasicDataset implementation for ZohoObjectDataset.
+func (zod ZohoObjectDataset) AsJSONDataset() (*JSONDataset, bool) {
+ return nil, false
+}
+
// AsDelimitedTextDataset is the BasicDataset implementation for ZohoObjectDataset.
func (zod ZohoObjectDataset) AsDelimitedTextDataset() (*DelimitedTextDataset, bool) {
return nil, false
@@ -188892,6 +203166,11 @@ func (zod ZohoObjectDataset) AsParquetDataset() (*ParquetDataset, bool) {
return nil, false
}
+// AsAvroDataset is the BasicDataset implementation for ZohoObjectDataset.
+func (zod ZohoObjectDataset) AsAvroDataset() (*AvroDataset, bool) {
+ return nil, false
+}
+
// AsAmazonS3Dataset is the BasicDataset implementation for ZohoObjectDataset.
func (zod ZohoObjectDataset) AsAmazonS3Dataset() (*AmazonS3Dataset, bool) {
return nil, false
@@ -189027,7 +203306,7 @@ type ZohoSource struct {
SourceRetryWait interface{} `json:"sourceRetryWait,omitempty"`
// MaxConcurrentConnections - The maximum concurrent connection count for the source data store. Type: integer (or Expression with resultType integer).
MaxConcurrentConnections interface{} `json:"maxConcurrentConnections,omitempty"`
- // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeDelimitedTextSource', 'TypeParquetSource'
+ // Type - Possible values include: 'TypeCopySource', 'TypeAmazonRedshiftSource', 'TypeGoogleAdWordsSource', 'TypeOracleServiceCloudSource', 'TypeDynamicsAXSource', 'TypeResponsysSource', 'TypeSalesforceMarketingCloudSource', 'TypeVerticaSource', 'TypeNetezzaSource', 'TypeZohoSource', 'TypeXeroSource', 'TypeSquareSource', 'TypeSparkSource', 'TypeShopifySource', 'TypeServiceNowSource', 'TypeQuickBooksSource', 'TypePrestoSource', 'TypePhoenixSource', 'TypePaypalSource', 'TypeMarketoSource', 'TypeAzureMariaDBSource', 'TypeMariaDBSource', 'TypeMagentoSource', 'TypeJiraSource', 'TypeImpalaSource', 'TypeHubspotSource', 'TypeHiveSource', 'TypeHBaseSource', 'TypeGreenplumSource', 'TypeGoogleBigQuerySource', 'TypeEloquaSource', 'TypeDrillSource', 'TypeCouchbaseSource', 'TypeConcurSource', 'TypeAzurePostgreSQLSource', 'TypeAmazonMWSSource', 'TypeHTTPSource', 'TypeAzureBlobFSSource', 'TypeAzureDataLakeStoreSource', 'TypeOffice365Source', 'TypeCosmosDbMongoDbAPISource', 'TypeMongoDbV2Source', 'TypeMongoDbSource', 'TypeCassandraSource', 'TypeWebSource', 'TypeTeradataSource', 'TypeOracleSource', 'TypeAzureDataExplorerSource', 'TypeAzureMySQLSource', 'TypeHdfsSource', 'TypeFileSystemSource', 'TypeSQLDWSource', 'TypeSQLMISource', 'TypeAzureSQLSource', 'TypeSQLServerSource', 'TypeSQLSource', 'TypeRestSource', 'TypeSapTableSource', 'TypeSapOpenHubSource', 'TypeSapHanaSource', 'TypeSapEccSource', 'TypeSapCloudForCustomerSource', 'TypeSalesforceServiceCloudSource', 'TypeSalesforceSource', 'TypeODataSource', 'TypeSapBwSource', 'TypeSybaseSource', 'TypePostgreSQLSource', 'TypeMySQLSource', 'TypeOdbcSource', 'TypeDb2Source', 'TypeMicrosoftAccessSource', 'TypeInformixSource', 'TypeRelationalSource', 'TypeCommonDataServiceForAppsSource', 'TypeDynamicsCrmSource', 'TypeDynamicsSource', 'TypeDocumentDbCollectionSource', 'TypeBlobSource', 'TypeAzureTableSource', 'TypeBinarySource', 'TypeJSONSource', 'TypeDelimitedTextSource', 'TypeParquetSource', 'TypeAvroSource'
Type TypeBasicCopySource `json:"type,omitempty"`
}
@@ -189151,6 +203430,11 @@ func (zs ZohoSource) AsMarketoSource() (*MarketoSource, bool) {
return nil, false
}
+// AsAzureMariaDBSource is the BasicCopySource implementation for ZohoSource.
+func (zs ZohoSource) AsAzureMariaDBSource() (*AzureMariaDBSource, bool) {
+ return nil, false
+}
+
// AsMariaDBSource is the BasicCopySource implementation for ZohoSource.
func (zs ZohoSource) AsMariaDBSource() (*MariaDBSource, bool) {
return nil, false
@@ -189371,6 +203655,11 @@ func (zs ZohoSource) AsODataSource() (*ODataSource, bool) {
return nil, false
}
+// AsSapBwSource is the BasicCopySource implementation for ZohoSource.
+func (zs ZohoSource) AsSapBwSource() (*SapBwSource, bool) {
+ return nil, false
+}
+
// AsSybaseSource is the BasicCopySource implementation for ZohoSource.
func (zs ZohoSource) AsSybaseSource() (*SybaseSource, bool) {
return nil, false
@@ -189446,6 +203735,11 @@ func (zs ZohoSource) AsBinarySource() (*BinarySource, bool) {
return nil, false
}
+// AsJSONSource is the BasicCopySource implementation for ZohoSource.
+func (zs ZohoSource) AsJSONSource() (*JSONSource, bool) {
+ return nil, false
+}
+
// AsDelimitedTextSource is the BasicCopySource implementation for ZohoSource.
func (zs ZohoSource) AsDelimitedTextSource() (*DelimitedTextSource, bool) {
return nil, false
@@ -189456,6 +203750,11 @@ func (zs ZohoSource) AsParquetSource() (*ParquetSource, bool) {
return nil, false
}
+// AsAvroSource is the BasicCopySource implementation for ZohoSource.
+func (zs ZohoSource) AsAvroSource() (*AvroSource, bool) {
+ return nil, false
+}
+
// AsCopySource is the BasicCopySource implementation for ZohoSource.
func (zs ZohoSource) AsCopySource() (*CopySource, bool) {
return nil, false
diff --git a/services/datafactory/mgmt/2018-06-01/datafactory/triggerruns.go b/services/datafactory/mgmt/2018-06-01/datafactory/triggerruns.go
index a77a9d36a8ea..266c3fe6ff9f 100644
--- a/services/datafactory/mgmt/2018-06-01/datafactory/triggerruns.go
+++ b/services/datafactory/mgmt/2018-06-01/datafactory/triggerruns.go
@@ -136,3 +136,99 @@ func (client TriggerRunsClient) QueryByFactoryResponder(resp *http.Response) (re
result.Response = autorest.Response{Response: resp}
return
}
+
+// Rerun rerun single trigger instance by runId.
+// Parameters:
+// resourceGroupName - the resource group name.
+// factoryName - the factory name.
+// triggerName - the trigger name.
+// runID - the pipeline run identifier.
+func (client TriggerRunsClient) Rerun(ctx context.Context, resourceGroupName string, factoryName string, triggerName string, runID string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TriggerRunsClient.Rerun")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: factoryName,
+ Constraints: []validation.Constraint{{Target: "factoryName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "factoryName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "factoryName", Name: validation.Pattern, Rule: `^[A-Za-z0-9]+(?:-[A-Za-z0-9]+)*$`, Chain: nil}}},
+ {TargetValue: triggerName,
+ Constraints: []validation.Constraint{{Target: "triggerName", Name: validation.MaxLength, Rule: 260, Chain: nil},
+ {Target: "triggerName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "triggerName", Name: validation.Pattern, Rule: `^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("datafactory.TriggerRunsClient", "Rerun", err.Error())
+ }
+
+ req, err := client.RerunPreparer(ctx, resourceGroupName, factoryName, triggerName, runID)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "datafactory.TriggerRunsClient", "Rerun", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.RerunSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "datafactory.TriggerRunsClient", "Rerun", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.RerunResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "datafactory.TriggerRunsClient", "Rerun", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// RerunPreparer prepares the Rerun request.
+func (client TriggerRunsClient) RerunPreparer(ctx context.Context, resourceGroupName string, factoryName string, triggerName string, runID string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "factoryName": autorest.Encode("path", factoryName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "runId": autorest.Encode("path", runID),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "triggerName": autorest.Encode("path", triggerName),
+ }
+
+ const APIVersion = "2018-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/triggers/{triggerName}/triggerRuns/{runId}/rerun", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// RerunSender sends the Rerun request. The method will close the
+// http.Response Body if it receives an error.
+func (client TriggerRunsClient) RerunSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// RerunResponder handles the response to the Rerun request. The method always
+// closes the http.Response Body.
+func (client TriggerRunsClient) RerunResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
diff --git a/services/datafactory/mgmt/2018-06-01/datafactory/triggers.go b/services/datafactory/mgmt/2018-06-01/datafactory/triggers.go
index cc6fc803c787..ace9d7cfbbce 100644
--- a/services/datafactory/mgmt/2018-06-01/datafactory/triggers.go
+++ b/services/datafactory/mgmt/2018-06-01/datafactory/triggers.go
@@ -343,6 +343,101 @@ func (client TriggersClient) GetResponder(resp *http.Response) (result TriggerRe
return
}
+// GetEventSubscriptionStatus get a trigger's event subscription status.
+// Parameters:
+// resourceGroupName - the resource group name.
+// factoryName - the factory name.
+// triggerName - the trigger name.
+func (client TriggersClient) GetEventSubscriptionStatus(ctx context.Context, resourceGroupName string, factoryName string, triggerName string) (result TriggerSubscriptionOperationStatus, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TriggersClient.GetEventSubscriptionStatus")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: factoryName,
+ Constraints: []validation.Constraint{{Target: "factoryName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "factoryName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "factoryName", Name: validation.Pattern, Rule: `^[A-Za-z0-9]+(?:-[A-Za-z0-9]+)*$`, Chain: nil}}},
+ {TargetValue: triggerName,
+ Constraints: []validation.Constraint{{Target: "triggerName", Name: validation.MaxLength, Rule: 260, Chain: nil},
+ {Target: "triggerName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "triggerName", Name: validation.Pattern, Rule: `^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("datafactory.TriggersClient", "GetEventSubscriptionStatus", err.Error())
+ }
+
+ req, err := client.GetEventSubscriptionStatusPreparer(ctx, resourceGroupName, factoryName, triggerName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "datafactory.TriggersClient", "GetEventSubscriptionStatus", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetEventSubscriptionStatusSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "datafactory.TriggersClient", "GetEventSubscriptionStatus", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetEventSubscriptionStatusResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "datafactory.TriggersClient", "GetEventSubscriptionStatus", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetEventSubscriptionStatusPreparer prepares the GetEventSubscriptionStatus request.
+func (client TriggersClient) GetEventSubscriptionStatusPreparer(ctx context.Context, resourceGroupName string, factoryName string, triggerName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "factoryName": autorest.Encode("path", factoryName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "triggerName": autorest.Encode("path", triggerName),
+ }
+
+ const APIVersion = "2018-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/triggers/{triggerName}/getEventSubscriptionStatus", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetEventSubscriptionStatusSender sends the GetEventSubscriptionStatus request. The method will close the
+// http.Response Body if it receives an error.
+func (client TriggersClient) GetEventSubscriptionStatusSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetEventSubscriptionStatusResponder handles the response to the GetEventSubscriptionStatus request. The method always
+// closes the http.Response Body.
+func (client TriggersClient) GetEventSubscriptionStatusResponder(resp *http.Response) (result TriggerSubscriptionOperationStatus, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
// ListByFactory lists triggers.
// Parameters:
// resourceGroupName - the resource group name.
@@ -657,3 +752,193 @@ func (client TriggersClient) StopResponder(resp *http.Response) (result autorest
result.Response = resp
return
}
+
+// SubscribeToEvents subscribe event trigger to events.
+// Parameters:
+// resourceGroupName - the resource group name.
+// factoryName - the factory name.
+// triggerName - the trigger name.
+func (client TriggersClient) SubscribeToEvents(ctx context.Context, resourceGroupName string, factoryName string, triggerName string) (result TriggersSubscribeToEventsFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TriggersClient.SubscribeToEvents")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: factoryName,
+ Constraints: []validation.Constraint{{Target: "factoryName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "factoryName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "factoryName", Name: validation.Pattern, Rule: `^[A-Za-z0-9]+(?:-[A-Za-z0-9]+)*$`, Chain: nil}}},
+ {TargetValue: triggerName,
+ Constraints: []validation.Constraint{{Target: "triggerName", Name: validation.MaxLength, Rule: 260, Chain: nil},
+ {Target: "triggerName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "triggerName", Name: validation.Pattern, Rule: `^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("datafactory.TriggersClient", "SubscribeToEvents", err.Error())
+ }
+
+ req, err := client.SubscribeToEventsPreparer(ctx, resourceGroupName, factoryName, triggerName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "datafactory.TriggersClient", "SubscribeToEvents", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.SubscribeToEventsSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "datafactory.TriggersClient", "SubscribeToEvents", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// SubscribeToEventsPreparer prepares the SubscribeToEvents request.
+func (client TriggersClient) SubscribeToEventsPreparer(ctx context.Context, resourceGroupName string, factoryName string, triggerName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "factoryName": autorest.Encode("path", factoryName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "triggerName": autorest.Encode("path", triggerName),
+ }
+
+ const APIVersion = "2018-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/triggers/{triggerName}/subscribeToEvents", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// SubscribeToEventsSender sends the SubscribeToEvents request. The method will close the
+// http.Response Body if it receives an error.
+func (client TriggersClient) SubscribeToEventsSender(req *http.Request) (future TriggersSubscribeToEventsFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// SubscribeToEventsResponder handles the response to the SubscribeToEvents request. The method always
+// closes the http.Response Body.
+func (client TriggersClient) SubscribeToEventsResponder(resp *http.Response) (result TriggerSubscriptionOperationStatus, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// UnsubscribeFromEvents unsubscribe event trigger from events.
+// Parameters:
+// resourceGroupName - the resource group name.
+// factoryName - the factory name.
+// triggerName - the trigger name.
+func (client TriggersClient) UnsubscribeFromEvents(ctx context.Context, resourceGroupName string, factoryName string, triggerName string) (result TriggersUnsubscribeFromEventsFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TriggersClient.UnsubscribeFromEvents")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: factoryName,
+ Constraints: []validation.Constraint{{Target: "factoryName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "factoryName", Name: validation.MinLength, Rule: 3, Chain: nil},
+ {Target: "factoryName", Name: validation.Pattern, Rule: `^[A-Za-z0-9]+(?:-[A-Za-z0-9]+)*$`, Chain: nil}}},
+ {TargetValue: triggerName,
+ Constraints: []validation.Constraint{{Target: "triggerName", Name: validation.MaxLength, Rule: 260, Chain: nil},
+ {Target: "triggerName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "triggerName", Name: validation.Pattern, Rule: `^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("datafactory.TriggersClient", "UnsubscribeFromEvents", err.Error())
+ }
+
+ req, err := client.UnsubscribeFromEventsPreparer(ctx, resourceGroupName, factoryName, triggerName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "datafactory.TriggersClient", "UnsubscribeFromEvents", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.UnsubscribeFromEventsSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "datafactory.TriggersClient", "UnsubscribeFromEvents", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// UnsubscribeFromEventsPreparer prepares the UnsubscribeFromEvents request.
+func (client TriggersClient) UnsubscribeFromEventsPreparer(ctx context.Context, resourceGroupName string, factoryName string, triggerName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "factoryName": autorest.Encode("path", factoryName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "triggerName": autorest.Encode("path", triggerName),
+ }
+
+ const APIVersion = "2018-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/triggers/{triggerName}/unsubscribeFromEvents", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UnsubscribeFromEventsSender sends the UnsubscribeFromEvents request. The method will close the
+// http.Response Body if it receives an error.
+func (client TriggersClient) UnsubscribeFromEventsSender(req *http.Request) (future TriggersUnsubscribeFromEventsFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// UnsubscribeFromEventsResponder handles the response to the UnsubscribeFromEvents request. The method always
+// closes the http.Response Body.
+func (client TriggersClient) UnsubscribeFromEventsResponder(resp *http.Response) (result TriggerSubscriptionOperationStatus, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/datamigration/mgmt/2018-04-19/datamigration/models.go b/services/datamigration/mgmt/2018-04-19/datamigration/models.go
index 8bbd8bfc24cd..75375eb2a75c 100644
--- a/services/datamigration/mgmt/2018-04-19/datamigration/models.go
+++ b/services/datamigration/mgmt/2018-04-19/datamigration/models.go
@@ -142,17 +142,17 @@ func PossibleCommandStateValues() []CommandState {
type CommandType string
const (
+ // CommandTypeCommandProperties ...
+ CommandTypeCommandProperties CommandType = "CommandProperties"
// CommandTypeMigrateSQLServerAzureDbSQLMiComplete ...
CommandTypeMigrateSQLServerAzureDbSQLMiComplete CommandType = "Migrate.SqlServer.AzureDbSqlMi.Complete"
// CommandTypeMigrateSyncCompleteDatabase ...
CommandTypeMigrateSyncCompleteDatabase CommandType = "Migrate.Sync.Complete.Database"
- // CommandTypeUnknown ...
- CommandTypeUnknown CommandType = "Unknown"
)
// PossibleCommandTypeValues returns an array of possible values for the CommandType const type.
func PossibleCommandTypeValues() []CommandType {
- return []CommandType{CommandTypeMigrateSQLServerAzureDbSQLMiComplete, CommandTypeMigrateSyncCompleteDatabase, CommandTypeUnknown}
+ return []CommandType{CommandTypeCommandProperties, CommandTypeMigrateSQLServerAzureDbSQLMiComplete, CommandTypeMigrateSyncCompleteDatabase}
}
// DatabaseCompatLevel enumerates the values for database compat level.
@@ -991,8 +991,8 @@ const (
TaskTypeMigrateSQLServerAzureSQLDbSync TaskType = "Migrate.SqlServer.AzureSqlDb.Sync"
// TaskTypeMigrateSQLServerSQLDb ...
TaskTypeMigrateSQLServerSQLDb TaskType = "Migrate.SqlServer.SqlDb"
- // TaskTypeUnknown ...
- TaskTypeUnknown TaskType = "Unknown"
+ // TaskTypeProjectTaskProperties ...
+ TaskTypeProjectTaskProperties TaskType = "ProjectTaskProperties"
// TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI ...
TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI TaskType = "ValidateMigrationInput.SqlServer.AzureSqlDbMI"
// TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS ...
@@ -1003,13 +1003,15 @@ const (
// PossibleTaskTypeValues returns an array of possible values for the TaskType const type.
func PossibleTaskTypeValues() []TaskType {
- return []TaskType{TaskTypeConnectToSourceMySQL, TaskTypeConnectToSourcePostgreSQLSync, TaskTypeConnectToSourceSQLServer, TaskTypeConnectToSourceSQLServerSync, TaskTypeConnectToTargetAzureDbForMySQL, TaskTypeConnectToTargetAzureDbForPostgreSQLSync, TaskTypeConnectToTargetAzureSQLDbMI, TaskTypeConnectToTargetAzureSQLDbMISyncLRS, TaskTypeConnectToTargetSQLDb, TaskTypeConnectToTargetSQLDbSync, TaskTypeGetTDECertificatesSQL, TaskTypeGetUserTablesAzureSQLDbSync, TaskTypeGetUserTablesSQL, TaskTypeMigrateMySQLAzureDbForMySQLSync, TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync, TaskTypeMigrateSQLServerAzureSQLDbMI, TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS, TaskTypeMigrateSQLServerAzureSQLDbSync, TaskTypeMigrateSQLServerSQLDb, TaskTypeUnknown, TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI, TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS, TaskTypeValidateMigrationInputSQLServerSQLDbSync}
+ return []TaskType{TaskTypeConnectToSourceMySQL, TaskTypeConnectToSourcePostgreSQLSync, TaskTypeConnectToSourceSQLServer, TaskTypeConnectToSourceSQLServerSync, TaskTypeConnectToTargetAzureDbForMySQL, TaskTypeConnectToTargetAzureDbForPostgreSQLSync, TaskTypeConnectToTargetAzureSQLDbMI, TaskTypeConnectToTargetAzureSQLDbMISyncLRS, TaskTypeConnectToTargetSQLDb, TaskTypeConnectToTargetSQLDbSync, TaskTypeGetTDECertificatesSQL, TaskTypeGetUserTablesAzureSQLDbSync, TaskTypeGetUserTablesSQL, TaskTypeMigrateMySQLAzureDbForMySQLSync, TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync, TaskTypeMigrateSQLServerAzureSQLDbMI, TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS, TaskTypeMigrateSQLServerAzureSQLDbSync, TaskTypeMigrateSQLServerSQLDb, TaskTypeProjectTaskProperties, TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI, TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS, TaskTypeValidateMigrationInputSQLServerSQLDbSync}
}
// Type enumerates the values for type.
type Type string
const (
+ // TypeConnectionInfo ...
+ TypeConnectionInfo Type = "ConnectionInfo"
// TypeMiSQLConnectionInfo ...
TypeMiSQLConnectionInfo Type = "MiSqlConnectionInfo"
// TypeMySQLConnectionInfo ...
@@ -1018,13 +1020,11 @@ const (
TypePostgreSQLConnectionInfo Type = "PostgreSqlConnectionInfo"
// TypeSQLConnectionInfo ...
TypeSQLConnectionInfo Type = "SqlConnectionInfo"
- // TypeUnknown ...
- TypeUnknown Type = "Unknown"
)
// PossibleTypeValues returns an array of possible values for the Type const type.
func PossibleTypeValues() []Type {
- return []Type{TypeMiSQLConnectionInfo, TypeMySQLConnectionInfo, TypePostgreSQLConnectionInfo, TypeSQLConnectionInfo, TypeUnknown}
+ return []Type{TypeConnectionInfo, TypeMiSQLConnectionInfo, TypeMySQLConnectionInfo, TypePostgreSQLConnectionInfo, TypeSQLConnectionInfo}
}
// UpdateActionType enumerates the values for update action type.
@@ -1176,7 +1176,7 @@ type CommandProperties struct {
Errors *[]ODataError `json:"errors,omitempty"`
// State - READ-ONLY; The state of the command. This is ignored if submitted. Possible values include: 'Unknown', 'Accepted', 'Running', 'Succeeded', 'Failed'
State CommandState `json:"state,omitempty"`
- // CommandType - Possible values include: 'CommandTypeUnknown', 'CommandTypeMigrateSQLServerAzureDbSQLMiComplete', 'CommandTypeMigrateSyncCompleteDatabase'
+ // CommandType - Possible values include: 'CommandTypeCommandProperties', 'CommandTypeMigrateSQLServerAzureDbSQLMiComplete', 'CommandTypeMigrateSyncCompleteDatabase'
CommandType CommandType `json:"commandType,omitempty"`
}
@@ -1223,7 +1223,7 @@ func unmarshalBasicCommandPropertiesArray(body []byte) ([]BasicCommandProperties
// MarshalJSON is the custom marshaler for CommandProperties.
func (cp CommandProperties) MarshalJSON() ([]byte, error) {
- cp.CommandType = CommandTypeUnknown
+ cp.CommandType = CommandTypeCommandProperties
objectMap := make(map[string]interface{})
if cp.CommandType != "" {
objectMap["commandType"] = cp.CommandType
@@ -1266,7 +1266,7 @@ type ConnectionInfo struct {
UserName *string `json:"userName,omitempty"`
// Password - Password credential.
Password *string `json:"password,omitempty"`
- // Type - Possible values include: 'TypeUnknown', 'TypeMiSQLConnectionInfo', 'TypePostgreSQLConnectionInfo', 'TypeMySQLConnectionInfo', 'TypeSQLConnectionInfo'
+ // Type - Possible values include: 'TypeConnectionInfo', 'TypeMiSQLConnectionInfo', 'TypePostgreSQLConnectionInfo', 'TypeMySQLConnectionInfo', 'TypeSQLConnectionInfo'
Type Type `json:"type,omitempty"`
}
@@ -1321,7 +1321,7 @@ func unmarshalBasicConnectionInfoArray(body []byte) ([]BasicConnectionInfo, erro
// MarshalJSON is the custom marshaler for ConnectionInfo.
func (ci ConnectionInfo) MarshalJSON() ([]byte, error) {
- ci.Type = TypeUnknown
+ ci.Type = TypeConnectionInfo
objectMap := make(map[string]interface{})
if ci.UserName != nil {
objectMap["userName"] = ci.UserName
@@ -1387,7 +1387,7 @@ type ConnectToSourceMySQLTaskProperties struct {
State TaskState `json:"state,omitempty"`
// Commands - READ-ONLY; Array of command properties.
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -1641,7 +1641,7 @@ type ConnectToSourcePostgreSQLSyncTaskProperties struct {
State TaskState `json:"state,omitempty"`
// Commands - READ-ONLY; Array of command properties.
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -1859,7 +1859,7 @@ type ConnectToSourceSQLServerSyncTaskProperties struct {
State TaskState `json:"state,omitempty"`
// Commands - READ-ONLY; Array of command properties.
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -2437,7 +2437,7 @@ type ConnectToSourceSQLServerTaskProperties struct {
State TaskState `json:"state,omitempty"`
// Commands - READ-ONLY; Array of command properties.
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -2678,7 +2678,7 @@ type ConnectToTargetAzureDbForMySQLTaskProperties struct {
State TaskState `json:"state,omitempty"`
// Commands - READ-ONLY; Array of command properties.
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -2920,7 +2920,7 @@ type ConnectToTargetAzureDbForPostgreSQLSyncTaskProperties struct {
State TaskState `json:"state,omitempty"`
// Commands - READ-ONLY; Array of command properties.
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -3164,7 +3164,7 @@ type ConnectToTargetSQLDbTaskProperties struct {
State TaskState `json:"state,omitempty"`
// Commands - READ-ONLY; Array of command properties.
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -3402,7 +3402,7 @@ type ConnectToTargetSQLMISyncTaskProperties struct {
State TaskState `json:"state,omitempty"`
// Commands - READ-ONLY; Array of command properties.
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -3644,7 +3644,7 @@ type ConnectToTargetSQLMITaskProperties struct {
State TaskState `json:"state,omitempty"`
// Commands - READ-ONLY; Array of command properties.
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -3871,7 +3871,7 @@ type ConnectToTargetSQLSQLDbSyncTaskProperties struct {
State TaskState `json:"state,omitempty"`
// Commands - READ-ONLY; Array of command properties.
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -4340,7 +4340,7 @@ type GetTdeCertificatesSQLTaskProperties struct {
State TaskState `json:"state,omitempty"`
// Commands - READ-ONLY; Array of command properties.
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -4590,7 +4590,7 @@ type GetUserTablesSQLSyncTaskProperties struct {
State TaskState `json:"state,omitempty"`
// Commands - READ-ONLY; Array of command properties.
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -4832,7 +4832,7 @@ type GetUserTablesSQLTaskProperties struct {
State TaskState `json:"state,omitempty"`
// Commands - READ-ONLY; Array of command properties.
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -5062,7 +5062,7 @@ type MigrateMISyncCompleteCommandProperties struct {
Errors *[]ODataError `json:"errors,omitempty"`
// State - READ-ONLY; The state of the command. This is ignored if submitted. Possible values include: 'Unknown', 'Accepted', 'Running', 'Succeeded', 'Failed'
State CommandState `json:"state,omitempty"`
- // CommandType - Possible values include: 'CommandTypeUnknown', 'CommandTypeMigrateSQLServerAzureDbSQLMiComplete', 'CommandTypeMigrateSyncCompleteDatabase'
+ // CommandType - Possible values include: 'CommandTypeCommandProperties', 'CommandTypeMigrateSQLServerAzureDbSQLMiComplete', 'CommandTypeMigrateSyncCompleteDatabase'
CommandType CommandType `json:"commandType,omitempty"`
}
@@ -5622,7 +5622,7 @@ type MigrateMySQLAzureDbForMySQLSyncTaskProperties struct {
State TaskState `json:"state,omitempty"`
// Commands - READ-ONLY; Array of command properties.
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -6349,7 +6349,7 @@ type MigratePostgreSQLAzureDbForPostgreSQLSyncTaskProperties struct {
State TaskState `json:"state,omitempty"`
// Commands - READ-ONLY; Array of command properties.
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -7126,7 +7126,7 @@ type MigrateSQLServerSQLDbSyncTaskProperties struct {
State TaskState `json:"state,omitempty"`
// Commands - READ-ONLY; Array of command properties.
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -7941,7 +7941,7 @@ type MigrateSQLServerSQLDbTaskProperties struct {
State TaskState `json:"state,omitempty"`
// Commands - READ-ONLY; Array of command properties.
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -8461,7 +8461,7 @@ type MigrateSQLServerSQLMISyncTaskProperties struct {
State TaskState `json:"state,omitempty"`
// Commands - READ-ONLY; Array of command properties.
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -9158,7 +9158,7 @@ type MigrateSQLServerSQLMITaskProperties struct {
State TaskState `json:"state,omitempty"`
// Commands - READ-ONLY; Array of command properties.
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -9402,7 +9402,7 @@ type MigrateSyncCompleteCommandProperties struct {
Errors *[]ODataError `json:"errors,omitempty"`
// State - READ-ONLY; The state of the command. This is ignored if submitted. Possible values include: 'Unknown', 'Accepted', 'Running', 'Succeeded', 'Failed'
State CommandState `json:"state,omitempty"`
- // CommandType - Possible values include: 'CommandTypeUnknown', 'CommandTypeMigrateSQLServerAzureDbSQLMiComplete', 'CommandTypeMigrateSyncCompleteDatabase'
+ // CommandType - Possible values include: 'CommandTypeCommandProperties', 'CommandTypeMigrateSQLServerAzureDbSQLMiComplete', 'CommandTypeMigrateSyncCompleteDatabase'
CommandType CommandType `json:"commandType,omitempty"`
}
@@ -9500,7 +9500,7 @@ type MiSQLConnectionInfo struct {
UserName *string `json:"userName,omitempty"`
// Password - Password credential.
Password *string `json:"password,omitempty"`
- // Type - Possible values include: 'TypeUnknown', 'TypeMiSQLConnectionInfo', 'TypePostgreSQLConnectionInfo', 'TypeMySQLConnectionInfo', 'TypeSQLConnectionInfo'
+ // Type - Possible values include: 'TypeConnectionInfo', 'TypeMiSQLConnectionInfo', 'TypePostgreSQLConnectionInfo', 'TypeMySQLConnectionInfo', 'TypeSQLConnectionInfo'
Type Type `json:"type,omitempty"`
}
@@ -9563,7 +9563,7 @@ type MySQLConnectionInfo struct {
UserName *string `json:"userName,omitempty"`
// Password - Password credential.
Password *string `json:"password,omitempty"`
- // Type - Possible values include: 'TypeUnknown', 'TypeMiSQLConnectionInfo', 'TypePostgreSQLConnectionInfo', 'TypeMySQLConnectionInfo', 'TypeSQLConnectionInfo'
+ // Type - Possible values include: 'TypeConnectionInfo', 'TypeMiSQLConnectionInfo', 'TypePostgreSQLConnectionInfo', 'TypeMySQLConnectionInfo', 'TypeSQLConnectionInfo'
Type Type `json:"type,omitempty"`
}
@@ -9732,7 +9732,7 @@ type PostgreSQLConnectionInfo struct {
UserName *string `json:"userName,omitempty"`
// Password - Password credential.
Password *string `json:"password,omitempty"`
- // Type - Possible values include: 'TypeUnknown', 'TypeMiSQLConnectionInfo', 'TypePostgreSQLConnectionInfo', 'TypeMySQLConnectionInfo', 'TypeSQLConnectionInfo'
+ // Type - Possible values include: 'TypeConnectionInfo', 'TypeMiSQLConnectionInfo', 'TypePostgreSQLConnectionInfo', 'TypeMySQLConnectionInfo', 'TypeSQLConnectionInfo'
Type Type `json:"type,omitempty"`
}
@@ -10263,7 +10263,7 @@ type ProjectTaskProperties struct {
State TaskState `json:"state,omitempty"`
// Commands - READ-ONLY; Array of command properties.
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -10390,7 +10390,7 @@ func unmarshalBasicProjectTaskPropertiesArray(body []byte) ([]BasicProjectTaskPr
// MarshalJSON is the custom marshaler for ProjectTaskProperties.
func (ptp ProjectTaskProperties) MarshalJSON() ([]byte, error) {
- ptp.TaskType = TaskTypeUnknown
+ ptp.TaskType = TaskTypeProjectTaskProperties
objectMap := make(map[string]interface{})
if ptp.TaskType != "" {
objectMap["taskType"] = ptp.TaskType
@@ -11837,7 +11837,7 @@ type SQLConnectionInfo struct {
UserName *string `json:"userName,omitempty"`
// Password - Password credential.
Password *string `json:"password,omitempty"`
- // Type - Possible values include: 'TypeUnknown', 'TypeMiSQLConnectionInfo', 'TypePostgreSQLConnectionInfo', 'TypeMySQLConnectionInfo', 'TypeSQLConnectionInfo'
+ // Type - Possible values include: 'TypeConnectionInfo', 'TypeMiSQLConnectionInfo', 'TypePostgreSQLConnectionInfo', 'TypeMySQLConnectionInfo', 'TypeSQLConnectionInfo'
Type Type `json:"type,omitempty"`
}
@@ -12135,7 +12135,7 @@ type ValidateMigrationInputSQLServerSQLDbSyncTaskProperties struct {
State TaskState `json:"state,omitempty"`
// Commands - READ-ONLY; Array of command properties.
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -12381,7 +12381,7 @@ type ValidateMigrationInputSQLServerSQLMISyncTaskProperties struct {
State TaskState `json:"state,omitempty"`
// Commands - READ-ONLY; Array of command properties.
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -12639,7 +12639,7 @@ type ValidateMigrationInputSQLServerSQLMITaskProperties struct {
State TaskState `json:"state,omitempty"`
// Commands - READ-ONLY; Array of command properties.
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectToSourceMySQL'
TaskType TaskType `json:"taskType,omitempty"`
}
diff --git a/services/kusto/mgmt/2019-01-21/kusto/clusters.go b/services/kusto/mgmt/2019-01-21/kusto/clusters.go
index 5d7c52174942..143194151d97 100644
--- a/services/kusto/mgmt/2019-01-21/kusto/clusters.go
+++ b/services/kusto/mgmt/2019-01-21/kusto/clusters.go
@@ -146,8 +146,7 @@ func (client ClustersClient) CreateOrUpdate(ctx context.Context, resourceGroupNa
}
if err := validation.Validate([]validation.Validation{
{TargetValue: parameters,
- Constraints: []validation.Constraint{{Target: "parameters.Sku", Name: validation.Null, Rule: true,
- Chain: []validation.Constraint{{Target: "parameters.Sku.Tier", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil {
+ Constraints: []validation.Constraint{{Target: "parameters.Sku", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
return result, validation.NewError("kusto.ClustersClient", "CreateOrUpdate", err.Error())
}
diff --git a/services/kusto/mgmt/2019-01-21/kusto/models.go b/services/kusto/mgmt/2019-01-21/kusto/models.go
index 6d4195894c48..db935cbffe82 100644
--- a/services/kusto/mgmt/2019-01-21/kusto/models.go
+++ b/services/kusto/mgmt/2019-01-21/kusto/models.go
@@ -51,25 +51,50 @@ func PossibleAzureScaleTypeValues() []AzureScaleType {
type AzureSkuName string
const (
- // D11V2 ...
- D11V2 AzureSkuName = "D11_v2"
- // D12V2 ...
- D12V2 AzureSkuName = "D12_v2"
- // D13V2 ...
- D13V2 AzureSkuName = "D13_v2"
- // D14V2 ...
- D14V2 AzureSkuName = "D14_v2"
- // L16 ...
- L16 AzureSkuName = "L16"
- // L4 ...
- L4 AzureSkuName = "L4"
- // L8 ...
- L8 AzureSkuName = "L8"
+ // DevNoSLAStandardD11V2 ...
+ DevNoSLAStandardD11V2 AzureSkuName = "Dev(No SLA)_Standard_D11_v2"
+ // StandardD11V2 ...
+ StandardD11V2 AzureSkuName = "Standard_D11_v2"
+ // StandardD12V2 ...
+ StandardD12V2 AzureSkuName = "Standard_D12_v2"
+ // StandardD13V2 ...
+ StandardD13V2 AzureSkuName = "Standard_D13_v2"
+ // StandardD14V2 ...
+ StandardD14V2 AzureSkuName = "Standard_D14_v2"
+ // StandardDS13V21TBPS ...
+ StandardDS13V21TBPS AzureSkuName = "Standard_DS13_v2+1TB_PS"
+ // StandardDS13V22TBPS ...
+ StandardDS13V22TBPS AzureSkuName = "Standard_DS13_v2+2TB_PS"
+ // StandardDS14V23TBPS ...
+ StandardDS14V23TBPS AzureSkuName = "Standard_DS14_v2+3TB_PS"
+ // StandardDS14V24TBPS ...
+ StandardDS14V24TBPS AzureSkuName = "Standard_DS14_v2+4TB_PS"
+ // StandardL16s ...
+ StandardL16s AzureSkuName = "Standard_L16s"
+ // StandardL4s ...
+ StandardL4s AzureSkuName = "Standard_L4s"
+ // StandardL8s ...
+ StandardL8s AzureSkuName = "Standard_L8s"
)
// PossibleAzureSkuNameValues returns an array of possible values for the AzureSkuName const type.
func PossibleAzureSkuNameValues() []AzureSkuName {
- return []AzureSkuName{D11V2, D12V2, D13V2, D14V2, L16, L4, L8}
+ return []AzureSkuName{DevNoSLAStandardD11V2, StandardD11V2, StandardD12V2, StandardD13V2, StandardD14V2, StandardDS13V21TBPS, StandardDS13V22TBPS, StandardDS14V23TBPS, StandardDS14V24TBPS, StandardL16s, StandardL4s, StandardL8s}
+}
+
+// AzureSkuTier enumerates the values for azure sku tier.
+type AzureSkuTier string
+
+const (
+ // Basic ...
+ Basic AzureSkuTier = "Basic"
+ // Standard ...
+ Standard AzureSkuTier = "Standard"
+)
+
+// PossibleAzureSkuTierValues returns an array of possible values for the AzureSkuTier const type.
+func PossibleAzureSkuTierValues() []AzureSkuTier {
+ return []AzureSkuTier{Basic, Standard}
}
// DatabasePrincipalRole enumerates the values for database principal role.
@@ -231,9 +256,9 @@ func PossibleStateValues() []State {
type AzureCapacity struct {
// ScaleType - Scale type. Possible values include: 'Automatic', 'Manual', 'None'
ScaleType AzureScaleType `json:"scaleType,omitempty"`
- // Minimum - Minimum allowed capacity.
+ // Minimum - Minimum allowed instances count.
Minimum *int32 `json:"minimum,omitempty"`
- // Maximum - Maximum allowed capacity.
+ // Maximum - Maximum allowed instances count.
Maximum *int32 `json:"maximum,omitempty"`
// Default - The default capacity that would be used.
Default *int32 `json:"default,omitempty"`
@@ -257,18 +282,18 @@ type AzureResourceSku struct {
ResourceType *string `json:"resourceType,omitempty"`
// Sku - The SKU details.
Sku *AzureSku `json:"sku,omitempty"`
- // Capacity - The SKU capacity.
+ // Capacity - The number of instances of the cluster.
Capacity *AzureCapacity `json:"capacity,omitempty"`
}
// AzureSku azure SKU definition.
type AzureSku struct {
- // Name - SKU name. Possible values include: 'D13V2', 'D14V2', 'L8', 'L16', 'D11V2', 'D12V2', 'L4'
+ // Name - SKU name. Possible values include: 'StandardDS13V21TBPS', 'StandardDS13V22TBPS', 'StandardDS14V23TBPS', 'StandardDS14V24TBPS', 'StandardD13V2', 'StandardD14V2', 'StandardL8s', 'StandardL16s', 'StandardD11V2', 'StandardD12V2', 'StandardL4s', 'DevNoSLAStandardD11V2'
Name AzureSkuName `json:"name,omitempty"`
- // Capacity - SKU capacity.
+ // Capacity - The number of instances of the cluster.
Capacity *int32 `json:"capacity,omitempty"`
- // Tier - SKU tier.
- Tier *string `json:"tier,omitempty"`
+ // Tier - SKU tier. Possible values include: 'Basic', 'Standard'
+ Tier AzureSkuTier `json:"tier,omitempty"`
}
// CheckNameResult the result returned from a check name availability request.
diff --git a/services/logic/mgmt/2016-06-01/logic/logicapi/interfaces.go b/services/logic/mgmt/2016-06-01/logic/logicapi/interfaces.go
index 8547270a5efd..868667281e2e 100644
--- a/services/logic/mgmt/2016-06-01/logic/logicapi/interfaces.go
+++ b/services/logic/mgmt/2016-06-01/logic/logicapi/interfaces.go
@@ -85,6 +85,7 @@ var _ WorkflowTriggerHistoriesClientAPI = (*logic.WorkflowTriggerHistoriesClient
// WorkflowRunsClientAPI contains the set of methods on the WorkflowRunsClient type.
type WorkflowRunsClientAPI interface {
Cancel(ctx context.Context, resourceGroupName string, workflowName string, runName string) (result autorest.Response, err error)
+ Delete(ctx context.Context, resourceGroupName string, workflowName string, runName string) (result autorest.Response, err error)
Get(ctx context.Context, resourceGroupName string, workflowName string, runName string) (result logic.WorkflowRun, err error)
List(ctx context.Context, resourceGroupName string, workflowName string, top *int32, filter string) (result logic.WorkflowRunListResultPage, err error)
}
@@ -240,3 +241,13 @@ type SessionsClientAPI interface {
}
var _ SessionsClientAPI = (*logic.SessionsClient)(nil)
+
+// RosettaNetProcessConfigurationsClientAPI contains the set of methods on the RosettaNetProcessConfigurationsClient type.
+type RosettaNetProcessConfigurationsClientAPI interface {
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, integrationAccountName string, rosettaNetProcessConfigurationName string, rosettaNetProcessConfiguration logic.IntegrationAccountRosettaNetProcessConfiguration) (result logic.IntegrationAccountRosettaNetProcessConfiguration, err error)
+ Delete(ctx context.Context, resourceGroupName string, integrationAccountName string, rosettaNetProcessConfigurationName string) (result autorest.Response, err error)
+ Get(ctx context.Context, resourceGroupName string, integrationAccountName string, rosettaNetProcessConfigurationName string) (result logic.IntegrationAccountRosettaNetProcessConfiguration, err error)
+ ListByIntegrationAccounts(ctx context.Context, resourceGroupName string, integrationAccountName string, top *int32, filter string) (result logic.IntegrationAccountRosettaNetProcessConfigurationListResultPage, err error)
+}
+
+var _ RosettaNetProcessConfigurationsClientAPI = (*logic.RosettaNetProcessConfigurationsClient)(nil)
diff --git a/services/logic/mgmt/2016-06-01/logic/models.go b/services/logic/mgmt/2016-06-01/logic/models.go
index 6dc78be1ae7f..e195bbb13a07 100644
--- a/services/logic/mgmt/2016-06-01/logic/models.go
+++ b/services/logic/mgmt/2016-06-01/logic/models.go
@@ -296,6 +296,25 @@ func PossibleMapTypeValues() []MapType {
return []MapType{MapTypeNotSpecified, MapTypeXslt}
}
+// MessageEncoding enumerates the values for message encoding.
+type MessageEncoding string
+
+const (
+ // MessageEncodingBase64 ...
+ MessageEncodingBase64 MessageEncoding = "Base64"
+ // MessageEncodingEightBit ...
+ MessageEncodingEightBit MessageEncoding = "EightBit"
+ // MessageEncodingNotSpecified ...
+ MessageEncodingNotSpecified MessageEncoding = "NotSpecified"
+ // MessageEncodingQuotedPrintable ...
+ MessageEncodingQuotedPrintable MessageEncoding = "QuotedPrintable"
+)
+
+// PossibleMessageEncodingValues returns an array of possible values for the MessageEncoding const type.
+func PossibleMessageEncodingValues() []MessageEncoding {
+ return []MessageEncoding{MessageEncodingBase64, MessageEncodingEightBit, MessageEncodingNotSpecified, MessageEncodingQuotedPrintable}
+}
+
// MessageFilterType enumerates the values for message filter type.
type MessageFilterType string
@@ -384,6 +403,103 @@ func PossibleRecurrenceFrequencyValues() []RecurrenceFrequency {
return []RecurrenceFrequency{RecurrenceFrequencyDay, RecurrenceFrequencyHour, RecurrenceFrequencyMinute, RecurrenceFrequencyMonth, RecurrenceFrequencyNotSpecified, RecurrenceFrequencySecond, RecurrenceFrequencyWeek, RecurrenceFrequencyYear}
}
+// RosettaNetActionType enumerates the values for rosetta net action type.
+type RosettaNetActionType string
+
+const (
+ // RosettaNetActionTypeDoubleAction ...
+ RosettaNetActionTypeDoubleAction RosettaNetActionType = "DoubleAction"
+ // RosettaNetActionTypeNotSpecified ...
+ RosettaNetActionTypeNotSpecified RosettaNetActionType = "NotSpecified"
+ // RosettaNetActionTypeSingleAction ...
+ RosettaNetActionTypeSingleAction RosettaNetActionType = "SingleAction"
+)
+
+// PossibleRosettaNetActionTypeValues returns an array of possible values for the RosettaNetActionType const type.
+func PossibleRosettaNetActionTypeValues() []RosettaNetActionType {
+ return []RosettaNetActionType{RosettaNetActionTypeDoubleAction, RosettaNetActionTypeNotSpecified, RosettaNetActionTypeSingleAction}
+}
+
+// RosettaNetPipActivityType enumerates the values for rosetta net pip activity type.
+type RosettaNetPipActivityType string
+
+const (
+ // RosettaNetPipActivityTypeBusinessTransaction ...
+ RosettaNetPipActivityTypeBusinessTransaction RosettaNetPipActivityType = "BusinessTransaction"
+ // RosettaNetPipActivityTypeInformationDistribution ...
+ RosettaNetPipActivityTypeInformationDistribution RosettaNetPipActivityType = "InformationDistribution"
+ // RosettaNetPipActivityTypeNotification ...
+ RosettaNetPipActivityTypeNotification RosettaNetPipActivityType = "Notification"
+ // RosettaNetPipActivityTypeNotSpecified ...
+ RosettaNetPipActivityTypeNotSpecified RosettaNetPipActivityType = "NotSpecified"
+ // RosettaNetPipActivityTypeQueryResponse ...
+ RosettaNetPipActivityTypeQueryResponse RosettaNetPipActivityType = "QueryResponse"
+ // RosettaNetPipActivityTypeRequestConfirm ...
+ RosettaNetPipActivityTypeRequestConfirm RosettaNetPipActivityType = "RequestConfirm"
+ // RosettaNetPipActivityTypeRequestResponse ...
+ RosettaNetPipActivityTypeRequestResponse RosettaNetPipActivityType = "RequestResponse"
+)
+
+// PossibleRosettaNetPipActivityTypeValues returns an array of possible values for the RosettaNetPipActivityType const type.
+func PossibleRosettaNetPipActivityTypeValues() []RosettaNetPipActivityType {
+ return []RosettaNetPipActivityType{RosettaNetPipActivityTypeBusinessTransaction, RosettaNetPipActivityTypeInformationDistribution, RosettaNetPipActivityTypeNotification, RosettaNetPipActivityTypeNotSpecified, RosettaNetPipActivityTypeQueryResponse, RosettaNetPipActivityTypeRequestConfirm, RosettaNetPipActivityTypeRequestResponse}
+}
+
+// RosettaNetPipConfidentialityScope enumerates the values for rosetta net pip confidentiality scope.
+type RosettaNetPipConfidentialityScope string
+
+const (
+ // RosettaNetPipConfidentialityScopeNone ...
+ RosettaNetPipConfidentialityScopeNone RosettaNetPipConfidentialityScope = "None"
+ // RosettaNetPipConfidentialityScopeNotSpecified ...
+ RosettaNetPipConfidentialityScopeNotSpecified RosettaNetPipConfidentialityScope = "NotSpecified"
+ // RosettaNetPipConfidentialityScopePayload ...
+ RosettaNetPipConfidentialityScopePayload RosettaNetPipConfidentialityScope = "Payload"
+ // RosettaNetPipConfidentialityScopePayloadContainer ...
+ RosettaNetPipConfidentialityScopePayloadContainer RosettaNetPipConfidentialityScope = "PayloadContainer"
+)
+
+// PossibleRosettaNetPipConfidentialityScopeValues returns an array of possible values for the RosettaNetPipConfidentialityScope const type.
+func PossibleRosettaNetPipConfidentialityScopeValues() []RosettaNetPipConfidentialityScope {
+ return []RosettaNetPipConfidentialityScope{RosettaNetPipConfidentialityScopeNone, RosettaNetPipConfidentialityScopeNotSpecified, RosettaNetPipConfidentialityScopePayload, RosettaNetPipConfidentialityScopePayloadContainer}
+}
+
+// RosettaNetPipRoleType enumerates the values for rosetta net pip role type.
+type RosettaNetPipRoleType string
+
+const (
+ // RosettaNetPipRoleTypeEmployee ...
+ RosettaNetPipRoleTypeEmployee RosettaNetPipRoleType = "Employee"
+ // RosettaNetPipRoleTypeFunctional ...
+ RosettaNetPipRoleTypeFunctional RosettaNetPipRoleType = "Functional"
+ // RosettaNetPipRoleTypeNotSpecified ...
+ RosettaNetPipRoleTypeNotSpecified RosettaNetPipRoleType = "NotSpecified"
+ // RosettaNetPipRoleTypeOrganizational ...
+ RosettaNetPipRoleTypeOrganizational RosettaNetPipRoleType = "Organizational"
+)
+
+// PossibleRosettaNetPipRoleTypeValues returns an array of possible values for the RosettaNetPipRoleType const type.
+func PossibleRosettaNetPipRoleTypeValues() []RosettaNetPipRoleType {
+ return []RosettaNetPipRoleType{RosettaNetPipRoleTypeEmployee, RosettaNetPipRoleTypeFunctional, RosettaNetPipRoleTypeNotSpecified, RosettaNetPipRoleTypeOrganizational}
+}
+
+// RosettaNetResponseType enumerates the values for rosetta net response type.
+type RosettaNetResponseType string
+
+const (
+ // RosettaNetResponseTypeAsync ...
+ RosettaNetResponseTypeAsync RosettaNetResponseType = "Async"
+ // RosettaNetResponseTypeNotSpecified ...
+ RosettaNetResponseTypeNotSpecified RosettaNetResponseType = "NotSpecified"
+ // RosettaNetResponseTypeSync ...
+ RosettaNetResponseTypeSync RosettaNetResponseType = "Sync"
+)
+
+// PossibleRosettaNetResponseTypeValues returns an array of possible values for the RosettaNetResponseType const type.
+func PossibleRosettaNetResponseTypeValues() []RosettaNetResponseType {
+ return []RosettaNetResponseType{RosettaNetResponseTypeAsync, RosettaNetResponseTypeNotSpecified, RosettaNetResponseTypeSync}
+}
+
// SchemaType enumerates the values for schema type.
type SchemaType string
@@ -2803,6 +2919,312 @@ type IntegrationAccountPartnerProperties struct {
Content *PartnerContent `json:"content,omitempty"`
}
+// IntegrationAccountRosettaNetProcessConfiguration the integration account RosettaNet process
+// configuration.
+type IntegrationAccountRosettaNetProcessConfiguration struct {
+ autorest.Response `json:"-"`
+ // IntegrationAccountRosettaNetProcessConfigurationProperties - The integration account RosettaNet process configuration properties.
+ *IntegrationAccountRosettaNetProcessConfigurationProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; The resource id.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Gets the resource name.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Gets the resource type.
+ Type *string `json:"type,omitempty"`
+ // Location - The resource location.
+ Location *string `json:"location,omitempty"`
+ // Tags - The resource tags.
+ Tags map[string]*string `json:"tags"`
+}
+
+// MarshalJSON is the custom marshaler for IntegrationAccountRosettaNetProcessConfiguration.
+func (iarnpc IntegrationAccountRosettaNetProcessConfiguration) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if iarnpc.IntegrationAccountRosettaNetProcessConfigurationProperties != nil {
+ objectMap["properties"] = iarnpc.IntegrationAccountRosettaNetProcessConfigurationProperties
+ }
+ if iarnpc.Location != nil {
+ objectMap["location"] = iarnpc.Location
+ }
+ if iarnpc.Tags != nil {
+ objectMap["tags"] = iarnpc.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for IntegrationAccountRosettaNetProcessConfiguration struct.
+func (iarnpc *IntegrationAccountRosettaNetProcessConfiguration) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var integrationAccountRosettaNetProcessConfigurationProperties IntegrationAccountRosettaNetProcessConfigurationProperties
+ err = json.Unmarshal(*v, &integrationAccountRosettaNetProcessConfigurationProperties)
+ if err != nil {
+ return err
+ }
+ iarnpc.IntegrationAccountRosettaNetProcessConfigurationProperties = &integrationAccountRosettaNetProcessConfigurationProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ iarnpc.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ iarnpc.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ iarnpc.Type = &typeVar
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ iarnpc.Location = &location
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ iarnpc.Tags = tags
+ }
+ }
+ }
+
+ return nil
+}
+
+// IntegrationAccountRosettaNetProcessConfigurationListResult the list of integration account RosettaNet
+// process configuration.
+type IntegrationAccountRosettaNetProcessConfigurationListResult struct {
+ autorest.Response `json:"-"`
+ // Value - The list of integration account RosettaNet PIP configuration.
+ Value *[]IntegrationAccountRosettaNetProcessConfiguration `json:"value,omitempty"`
+ // NextLink - The URL to get the next set of results.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// IntegrationAccountRosettaNetProcessConfigurationListResultIterator provides access to a complete listing
+// of IntegrationAccountRosettaNetProcessConfiguration values.
+type IntegrationAccountRosettaNetProcessConfigurationListResultIterator struct {
+ i int
+ page IntegrationAccountRosettaNetProcessConfigurationListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *IntegrationAccountRosettaNetProcessConfigurationListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/IntegrationAccountRosettaNetProcessConfigurationListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *IntegrationAccountRosettaNetProcessConfigurationListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter IntegrationAccountRosettaNetProcessConfigurationListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter IntegrationAccountRosettaNetProcessConfigurationListResultIterator) Response() IntegrationAccountRosettaNetProcessConfigurationListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter IntegrationAccountRosettaNetProcessConfigurationListResultIterator) Value() IntegrationAccountRosettaNetProcessConfiguration {
+ if !iter.page.NotDone() {
+ return IntegrationAccountRosettaNetProcessConfiguration{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the IntegrationAccountRosettaNetProcessConfigurationListResultIterator type.
+func NewIntegrationAccountRosettaNetProcessConfigurationListResultIterator(page IntegrationAccountRosettaNetProcessConfigurationListResultPage) IntegrationAccountRosettaNetProcessConfigurationListResultIterator {
+ return IntegrationAccountRosettaNetProcessConfigurationListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (iarnpclr IntegrationAccountRosettaNetProcessConfigurationListResult) IsEmpty() bool {
+ return iarnpclr.Value == nil || len(*iarnpclr.Value) == 0
+}
+
+// integrationAccountRosettaNetProcessConfigurationListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (iarnpclr IntegrationAccountRosettaNetProcessConfigurationListResult) integrationAccountRosettaNetProcessConfigurationListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if iarnpclr.NextLink == nil || len(to.String(iarnpclr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(iarnpclr.NextLink)))
+}
+
+// IntegrationAccountRosettaNetProcessConfigurationListResultPage contains a page of
+// IntegrationAccountRosettaNetProcessConfiguration values.
+type IntegrationAccountRosettaNetProcessConfigurationListResultPage struct {
+ fn func(context.Context, IntegrationAccountRosettaNetProcessConfigurationListResult) (IntegrationAccountRosettaNetProcessConfigurationListResult, error)
+ iarnpclr IntegrationAccountRosettaNetProcessConfigurationListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *IntegrationAccountRosettaNetProcessConfigurationListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/IntegrationAccountRosettaNetProcessConfigurationListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.iarnpclr)
+ if err != nil {
+ return err
+ }
+ page.iarnpclr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *IntegrationAccountRosettaNetProcessConfigurationListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page IntegrationAccountRosettaNetProcessConfigurationListResultPage) NotDone() bool {
+ return !page.iarnpclr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page IntegrationAccountRosettaNetProcessConfigurationListResultPage) Response() IntegrationAccountRosettaNetProcessConfigurationListResult {
+ return page.iarnpclr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page IntegrationAccountRosettaNetProcessConfigurationListResultPage) Values() []IntegrationAccountRosettaNetProcessConfiguration {
+ if page.iarnpclr.IsEmpty() {
+ return nil
+ }
+ return *page.iarnpclr.Value
+}
+
+// Creates a new instance of the IntegrationAccountRosettaNetProcessConfigurationListResultPage type.
+func NewIntegrationAccountRosettaNetProcessConfigurationListResultPage(getNextPage func(context.Context, IntegrationAccountRosettaNetProcessConfigurationListResult) (IntegrationAccountRosettaNetProcessConfigurationListResult, error)) IntegrationAccountRosettaNetProcessConfigurationListResultPage {
+ return IntegrationAccountRosettaNetProcessConfigurationListResultPage{fn: getNextPage}
+}
+
+// IntegrationAccountRosettaNetProcessConfigurationProperties the integration account RosettaNet process
+// configuration properties.
+type IntegrationAccountRosettaNetProcessConfigurationProperties struct {
+ // CreatedTime - READ-ONLY; The created time.
+ CreatedTime *date.Time `json:"createdTime,omitempty"`
+ // ChangedTime - READ-ONLY; The changed time.
+ ChangedTime *date.Time `json:"changedTime,omitempty"`
+ // Metadata - The metadata.
+ Metadata map[string]*string `json:"metadata"`
+ // ActivitySettings - The RosettaNet process configuration activity settings.
+ ActivitySettings *RosettaNetPipActivitySettings `json:"activitySettings,omitempty"`
+ // ProcessCode - The integration account RosettaNet process code.
+ ProcessCode *string `json:"processCode,omitempty"`
+ // ProcessVersion - The integration account RosettaNet process version.
+ ProcessVersion *string `json:"processVersion,omitempty"`
+ // ProcessName - The integration account RosettaNet process name.
+ ProcessName *string `json:"processName,omitempty"`
+ // InitiatorRoleSettings - The RosettaNet initiator role settings.
+ InitiatorRoleSettings *RosettaNetPipRoleSettings `json:"initiatorRoleSettings,omitempty"`
+ // ResponderRoleSettings - The RosettaNet responder role settings.
+ ResponderRoleSettings *RosettaNetPipRoleSettings `json:"responderRoleSettings,omitempty"`
+ // Description - The integration account RosettaNet ProcessConfiguration properties.
+ Description *string `json:"description,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for IntegrationAccountRosettaNetProcessConfigurationProperties.
+func (iarnpcp IntegrationAccountRosettaNetProcessConfigurationProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if iarnpcp.Metadata != nil {
+ objectMap["metadata"] = iarnpcp.Metadata
+ }
+ if iarnpcp.ActivitySettings != nil {
+ objectMap["activitySettings"] = iarnpcp.ActivitySettings
+ }
+ if iarnpcp.ProcessCode != nil {
+ objectMap["processCode"] = iarnpcp.ProcessCode
+ }
+ if iarnpcp.ProcessVersion != nil {
+ objectMap["processVersion"] = iarnpcp.ProcessVersion
+ }
+ if iarnpcp.ProcessName != nil {
+ objectMap["processName"] = iarnpcp.ProcessName
+ }
+ if iarnpcp.InitiatorRoleSettings != nil {
+ objectMap["initiatorRoleSettings"] = iarnpcp.InitiatorRoleSettings
+ }
+ if iarnpcp.ResponderRoleSettings != nil {
+ objectMap["responderRoleSettings"] = iarnpcp.ResponderRoleSettings
+ }
+ if iarnpcp.Description != nil {
+ objectMap["description"] = iarnpcp.Description
+ }
+ return json.Marshal(objectMap)
+}
+
// IntegrationAccountSchema the integration account schema.
type IntegrationAccountSchema struct {
autorest.Response `json:"-"`
@@ -3939,6 +4361,73 @@ type RetryHistory struct {
Error *ErrorResponse `json:"error,omitempty"`
}
+// RosettaNetPipAcknowledgmentOfReceiptSettings the integration account RosettaNet ProcessConfiguration
+// Acknowledgement settings.
+type RosettaNetPipAcknowledgmentOfReceiptSettings struct {
+ // IsNonRepudiationRequired - The non-repudiation is required or not.
+ IsNonRepudiationRequired *bool `json:"isNonRepudiationRequired,omitempty"`
+ // TimeToAcknowledgeInSeconds - The time to acknowledge in seconds.
+ TimeToAcknowledgeInSeconds *int32 `json:"timeToAcknowledgeInSeconds,omitempty"`
+}
+
+// RosettaNetPipActivityBehavior the integration account RosettaNet ProcessConfiguration activity behavior.
+type RosettaNetPipActivityBehavior struct {
+ // IsAuthorizationRequired - The value indicating whether authorization is required.
+ IsAuthorizationRequired *bool `json:"isAuthorizationRequired,omitempty"`
+ // PersistentConfidentialityScope - The persistent confidentiality encryption scope. Possible values include: 'RosettaNetPipConfidentialityScopeNotSpecified', 'RosettaNetPipConfidentialityScopeNone', 'RosettaNetPipConfidentialityScopePayload', 'RosettaNetPipConfidentialityScopePayloadContainer'
+ PersistentConfidentialityScope RosettaNetPipConfidentialityScope `json:"persistentConfidentialityScope,omitempty"`
+ // IsSecuredTransportRequired - The value indicating whether secured transport is required.
+ IsSecuredTransportRequired *bool `json:"isSecuredTransportRequired,omitempty"`
+ // ActionType - The value indicating whether the RosettaNet PIP is used for a single action. Possible values include: 'RosettaNetActionTypeNotSpecified', 'RosettaNetActionTypeSingleAction', 'RosettaNetActionTypeDoubleAction'
+ ActionType RosettaNetActionType `json:"actionType,omitempty"`
+ // ResponseType - The value indicating whether the RosettaNet PIP communication is synchronous. Possible values include: 'RosettaNetResponseTypeNotSpecified', 'RosettaNetResponseTypeSync', 'RosettaNetResponseTypeAsync'
+ ResponseType RosettaNetResponseType `json:"responseType,omitempty"`
+ // NonRepudiationOfOriginAndContent - The value indicating whether non-repudiation is for origin and content.
+ NonRepudiationOfOriginAndContent *bool `json:"nonRepudiationOfOriginAndContent,omitempty"`
+ // RetryCount - The value indicating retry count.
+ RetryCount *int32 `json:"retryCount,omitempty"`
+ // TimeToPerformInSeconds - The time to perform in seconds.
+ TimeToPerformInSeconds *int32 `json:"timeToPerformInSeconds,omitempty"`
+}
+
+// RosettaNetPipActivitySettings the integration account RosettaNet ProcessConfiguration activity settings.
+type RosettaNetPipActivitySettings struct {
+ // ActivityType - The RosettaNet ProcessConfiguration activity type. Possible values include: 'RosettaNetPipActivityTypeNotSpecified', 'RosettaNetPipActivityTypeInformationDistribution', 'RosettaNetPipActivityTypeBusinessTransaction', 'RosettaNetPipActivityTypeNotification', 'RosettaNetPipActivityTypeQueryResponse', 'RosettaNetPipActivityTypeRequestConfirm', 'RosettaNetPipActivityTypeRequestResponse'
+ ActivityType RosettaNetPipActivityType `json:"activityType,omitempty"`
+ // ActivityBehavior - The RosettaNet ProcessConfiguration activity behavior.
+ ActivityBehavior *RosettaNetPipActivityBehavior `json:"activityBehavior,omitempty"`
+ // AcknowledgmentOfReceiptSettings - The RosettaNet ProcessConfiguration acknowledgement settings.
+ AcknowledgmentOfReceiptSettings *RosettaNetPipAcknowledgmentOfReceiptSettings `json:"acknowledgmentOfReceiptSettings,omitempty"`
+}
+
+// RosettaNetPipBusinessDocument the RosettaNet ProcessConfiguration business document settings.
+type RosettaNetPipBusinessDocument struct {
+ // Name - The business document name.
+ Name *string `json:"name,omitempty"`
+ // Version - The business document version.
+ Version *string `json:"version,omitempty"`
+ // Description - The business document description.
+ Description *string `json:"description,omitempty"`
+}
+
+// RosettaNetPipRoleSettings the integration account RosettaNet ProcessConfiguration role settings.
+type RosettaNetPipRoleSettings struct {
+ // Action - The action name.
+ Action *string `json:"action,omitempty"`
+ // Role - The role name.
+ Role *string `json:"role,omitempty"`
+ // RoleType - The RosettaNet ProcessConfiguration role type. Possible values include: 'RosettaNetPipRoleTypeNotSpecified', 'RosettaNetPipRoleTypeFunctional', 'RosettaNetPipRoleTypeOrganizational', 'RosettaNetPipRoleTypeEmployee'
+ RoleType RosettaNetPipRoleType `json:"roleType,omitempty"`
+ // Service - The service name.
+ Service *string `json:"service,omitempty"`
+ // ServiceClassification - The service classification name.
+ ServiceClassification *string `json:"serviceClassification,omitempty"`
+ // BusinessDocument - The RosettaNet ProcessConfiguration business document.
+ BusinessDocument *RosettaNetPipBusinessDocument `json:"businessDocument,omitempty"`
+ // Description - The description.
+ Description *string `json:"description,omitempty"`
+}
+
// RunActionCorrelation the workflow run action correlation properties.
type RunActionCorrelation struct {
// ActionTrackingID - The action tracking identifier.
@@ -4303,7 +4792,7 @@ type WorkflowProperties struct {
Sku *Sku `json:"sku,omitempty"`
// IntegrationAccount - The integration account.
IntegrationAccount *ResourceReference `json:"integrationAccount,omitempty"`
- // Definition - The definition.
+ // Definition - The definition. See [Schema reference for Workflow Definition Language in Azure Logic Apps](https://aka.ms/logic-apps-workflow-definition-language).
Definition interface{} `json:"definition,omitempty"`
// Parameters - The parameters.
Parameters map[string]*WorkflowParameter `json:"parameters"`
diff --git a/services/logic/mgmt/2016-06-01/logic/rosettanetprocessconfigurations.go b/services/logic/mgmt/2016-06-01/logic/rosettanetprocessconfigurations.go
new file mode 100644
index 000000000000..c249ea55c4bb
--- /dev/null
+++ b/services/logic/mgmt/2016-06-01/logic/rosettanetprocessconfigurations.go
@@ -0,0 +1,448 @@
+package logic
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// RosettaNetProcessConfigurationsClient is the REST API for Azure Logic Apps.
+type RosettaNetProcessConfigurationsClient struct {
+ BaseClient
+}
+
+// NewRosettaNetProcessConfigurationsClient creates an instance of the RosettaNetProcessConfigurationsClient client.
+func NewRosettaNetProcessConfigurationsClient(subscriptionID string) RosettaNetProcessConfigurationsClient {
+ return NewRosettaNetProcessConfigurationsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewRosettaNetProcessConfigurationsClientWithBaseURI creates an instance of the RosettaNetProcessConfigurationsClient
+// client.
+func NewRosettaNetProcessConfigurationsClientWithBaseURI(baseURI string, subscriptionID string) RosettaNetProcessConfigurationsClient {
+ return RosettaNetProcessConfigurationsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate creates or updates an integration account RosettaNetProcessConfiguration.
+// Parameters:
+// resourceGroupName - the resource group name.
+// integrationAccountName - the integration account name.
+// rosettaNetProcessConfigurationName - the integration account RosettaNet ProcessConfiguration name.
+// rosettaNetProcessConfiguration - the integration account RosettaNet ProcessConfiguration.
+func (client RosettaNetProcessConfigurationsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, integrationAccountName string, rosettaNetProcessConfigurationName string, rosettaNetProcessConfiguration IntegrationAccountRosettaNetProcessConfiguration) (result IntegrationAccountRosettaNetProcessConfiguration, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/RosettaNetProcessConfigurationsClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: rosettaNetProcessConfiguration,
+ Constraints: []validation.Constraint{{Target: "rosettaNetProcessConfiguration.IntegrationAccountRosettaNetProcessConfigurationProperties", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "rosettaNetProcessConfiguration.IntegrationAccountRosettaNetProcessConfigurationProperties.ActivitySettings", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "rosettaNetProcessConfiguration.IntegrationAccountRosettaNetProcessConfigurationProperties.ActivitySettings.ActivityBehavior", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "rosettaNetProcessConfiguration.IntegrationAccountRosettaNetProcessConfigurationProperties.ActivitySettings.ActivityBehavior.IsAuthorizationRequired", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "rosettaNetProcessConfiguration.IntegrationAccountRosettaNetProcessConfigurationProperties.ActivitySettings.ActivityBehavior.IsSecuredTransportRequired", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "rosettaNetProcessConfiguration.IntegrationAccountRosettaNetProcessConfigurationProperties.ActivitySettings.ActivityBehavior.NonRepudiationOfOriginAndContent", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "rosettaNetProcessConfiguration.IntegrationAccountRosettaNetProcessConfigurationProperties.ActivitySettings.ActivityBehavior.RetryCount", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "rosettaNetProcessConfiguration.IntegrationAccountRosettaNetProcessConfigurationProperties.ActivitySettings.ActivityBehavior.TimeToPerformInSeconds", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ {Target: "rosettaNetProcessConfiguration.IntegrationAccountRosettaNetProcessConfigurationProperties.ActivitySettings.AcknowledgmentOfReceiptSettings", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "rosettaNetProcessConfiguration.IntegrationAccountRosettaNetProcessConfigurationProperties.ActivitySettings.AcknowledgmentOfReceiptSettings.IsNonRepudiationRequired", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "rosettaNetProcessConfiguration.IntegrationAccountRosettaNetProcessConfigurationProperties.ActivitySettings.AcknowledgmentOfReceiptSettings.TimeToAcknowledgeInSeconds", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ }},
+ {Target: "rosettaNetProcessConfiguration.IntegrationAccountRosettaNetProcessConfigurationProperties.ProcessCode", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "rosettaNetProcessConfiguration.IntegrationAccountRosettaNetProcessConfigurationProperties.ProcessVersion", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "rosettaNetProcessConfiguration.IntegrationAccountRosettaNetProcessConfigurationProperties.ProcessName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "rosettaNetProcessConfiguration.IntegrationAccountRosettaNetProcessConfigurationProperties.InitiatorRoleSettings", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "rosettaNetProcessConfiguration.IntegrationAccountRosettaNetProcessConfigurationProperties.InitiatorRoleSettings.Action", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "rosettaNetProcessConfiguration.IntegrationAccountRosettaNetProcessConfigurationProperties.InitiatorRoleSettings.Role", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "rosettaNetProcessConfiguration.IntegrationAccountRosettaNetProcessConfigurationProperties.InitiatorRoleSettings.Service", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "rosettaNetProcessConfiguration.IntegrationAccountRosettaNetProcessConfigurationProperties.InitiatorRoleSettings.ServiceClassification", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "rosettaNetProcessConfiguration.IntegrationAccountRosettaNetProcessConfigurationProperties.InitiatorRoleSettings.BusinessDocument", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "rosettaNetProcessConfiguration.IntegrationAccountRosettaNetProcessConfigurationProperties.InitiatorRoleSettings.BusinessDocument.Name", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "rosettaNetProcessConfiguration.IntegrationAccountRosettaNetProcessConfigurationProperties.InitiatorRoleSettings.BusinessDocument.Version", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ }},
+ {Target: "rosettaNetProcessConfiguration.IntegrationAccountRosettaNetProcessConfigurationProperties.ResponderRoleSettings", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "rosettaNetProcessConfiguration.IntegrationAccountRosettaNetProcessConfigurationProperties.ResponderRoleSettings.Action", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "rosettaNetProcessConfiguration.IntegrationAccountRosettaNetProcessConfigurationProperties.ResponderRoleSettings.Role", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "rosettaNetProcessConfiguration.IntegrationAccountRosettaNetProcessConfigurationProperties.ResponderRoleSettings.Service", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "rosettaNetProcessConfiguration.IntegrationAccountRosettaNetProcessConfigurationProperties.ResponderRoleSettings.ServiceClassification", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "rosettaNetProcessConfiguration.IntegrationAccountRosettaNetProcessConfigurationProperties.ResponderRoleSettings.BusinessDocument", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "rosettaNetProcessConfiguration.IntegrationAccountRosettaNetProcessConfigurationProperties.ResponderRoleSettings.BusinessDocument.Name", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "rosettaNetProcessConfiguration.IntegrationAccountRosettaNetProcessConfigurationProperties.ResponderRoleSettings.BusinessDocument.Version", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ }},
+ }}}}}); err != nil {
+ return result, validation.NewError("logic.RosettaNetProcessConfigurationsClient", "CreateOrUpdate", err.Error())
+ }
+
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, integrationAccountName, rosettaNetProcessConfigurationName, rosettaNetProcessConfiguration)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "logic.RosettaNetProcessConfigurationsClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateOrUpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "logic.RosettaNetProcessConfigurationsClient", "CreateOrUpdate", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "logic.RosettaNetProcessConfigurationsClient", "CreateOrUpdate", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client RosettaNetProcessConfigurationsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, integrationAccountName string, rosettaNetProcessConfigurationName string, rosettaNetProcessConfiguration IntegrationAccountRosettaNetProcessConfiguration) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "integrationAccountName": autorest.Encode("path", integrationAccountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "rosettaNetProcessConfigurationName": autorest.Encode("path", rosettaNetProcessConfigurationName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2016-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/rosettanetprocessconfigurations/{rosettaNetProcessConfigurationName}", pathParameters),
+ autorest.WithJSON(rosettaNetProcessConfiguration),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client RosettaNetProcessConfigurationsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client RosettaNetProcessConfigurationsClient) CreateOrUpdateResponder(resp *http.Response) (result IntegrationAccountRosettaNetProcessConfiguration, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes an integration account RosettaNet ProcessConfiguration.
+// Parameters:
+// resourceGroupName - the resource group name.
+// integrationAccountName - the integration account name.
+// rosettaNetProcessConfigurationName - the integration account RosettaNetProcessConfiguration name.
+func (client RosettaNetProcessConfigurationsClient) Delete(ctx context.Context, resourceGroupName string, integrationAccountName string, rosettaNetProcessConfigurationName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/RosettaNetProcessConfigurationsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, resourceGroupName, integrationAccountName, rosettaNetProcessConfigurationName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "logic.RosettaNetProcessConfigurationsClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "logic.RosettaNetProcessConfigurationsClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "logic.RosettaNetProcessConfigurationsClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client RosettaNetProcessConfigurationsClient) DeletePreparer(ctx context.Context, resourceGroupName string, integrationAccountName string, rosettaNetProcessConfigurationName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "integrationAccountName": autorest.Encode("path", integrationAccountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "rosettaNetProcessConfigurationName": autorest.Encode("path", rosettaNetProcessConfigurationName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2016-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/rosettanetprocessconfigurations/{rosettaNetProcessConfigurationName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client RosettaNetProcessConfigurationsClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client RosettaNetProcessConfigurationsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets an integration account RosettaNetProcessConfiguration.
+// Parameters:
+// resourceGroupName - the resource group name.
+// integrationAccountName - the integration account name.
+// rosettaNetProcessConfigurationName - the integration account RosettaNetProcessConfiguration name.
+func (client RosettaNetProcessConfigurationsClient) Get(ctx context.Context, resourceGroupName string, integrationAccountName string, rosettaNetProcessConfigurationName string) (result IntegrationAccountRosettaNetProcessConfiguration, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/RosettaNetProcessConfigurationsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, resourceGroupName, integrationAccountName, rosettaNetProcessConfigurationName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "logic.RosettaNetProcessConfigurationsClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "logic.RosettaNetProcessConfigurationsClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "logic.RosettaNetProcessConfigurationsClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client RosettaNetProcessConfigurationsClient) GetPreparer(ctx context.Context, resourceGroupName string, integrationAccountName string, rosettaNetProcessConfigurationName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "integrationAccountName": autorest.Encode("path", integrationAccountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "rosettaNetProcessConfigurationName": autorest.Encode("path", rosettaNetProcessConfigurationName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2016-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/rosettanetprocessconfigurations/{rosettaNetProcessConfigurationName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client RosettaNetProcessConfigurationsClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client RosettaNetProcessConfigurationsClient) GetResponder(resp *http.Response) (result IntegrationAccountRosettaNetProcessConfiguration, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByIntegrationAccounts gets a list of integration account RosettaNet process configurations.
+// Parameters:
+// resourceGroupName - the resource group name.
+// integrationAccountName - the integration account name.
+// top - the number of items to be included in the result.
+// filter - the filter to apply on the operation.
+func (client RosettaNetProcessConfigurationsClient) ListByIntegrationAccounts(ctx context.Context, resourceGroupName string, integrationAccountName string, top *int32, filter string) (result IntegrationAccountRosettaNetProcessConfigurationListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/RosettaNetProcessConfigurationsClient.ListByIntegrationAccounts")
+ defer func() {
+ sc := -1
+ if result.iarnpclr.Response.Response != nil {
+ sc = result.iarnpclr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listByIntegrationAccountsNextResults
+ req, err := client.ListByIntegrationAccountsPreparer(ctx, resourceGroupName, integrationAccountName, top, filter)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "logic.RosettaNetProcessConfigurationsClient", "ListByIntegrationAccounts", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByIntegrationAccountsSender(req)
+ if err != nil {
+ result.iarnpclr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "logic.RosettaNetProcessConfigurationsClient", "ListByIntegrationAccounts", resp, "Failure sending request")
+ return
+ }
+
+ result.iarnpclr, err = client.ListByIntegrationAccountsResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "logic.RosettaNetProcessConfigurationsClient", "ListByIntegrationAccounts", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByIntegrationAccountsPreparer prepares the ListByIntegrationAccounts request.
+func (client RosettaNetProcessConfigurationsClient) ListByIntegrationAccountsPreparer(ctx context.Context, resourceGroupName string, integrationAccountName string, top *int32, filter string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "integrationAccountName": autorest.Encode("path", integrationAccountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2016-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if top != nil {
+ queryParameters["$top"] = autorest.Encode("query", *top)
+ }
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/rosettanetprocessconfigurations", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByIntegrationAccountsSender sends the ListByIntegrationAccounts request. The method will close the
+// http.Response Body if it receives an error.
+func (client RosettaNetProcessConfigurationsClient) ListByIntegrationAccountsSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByIntegrationAccountsResponder handles the response to the ListByIntegrationAccounts request. The method always
+// closes the http.Response Body.
+func (client RosettaNetProcessConfigurationsClient) ListByIntegrationAccountsResponder(resp *http.Response) (result IntegrationAccountRosettaNetProcessConfigurationListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByIntegrationAccountsNextResults retrieves the next set of results, if any.
+func (client RosettaNetProcessConfigurationsClient) listByIntegrationAccountsNextResults(ctx context.Context, lastResults IntegrationAccountRosettaNetProcessConfigurationListResult) (result IntegrationAccountRosettaNetProcessConfigurationListResult, err error) {
+ req, err := lastResults.integrationAccountRosettaNetProcessConfigurationListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "logic.RosettaNetProcessConfigurationsClient", "listByIntegrationAccountsNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByIntegrationAccountsSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "logic.RosettaNetProcessConfigurationsClient", "listByIntegrationAccountsNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByIntegrationAccountsResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "logic.RosettaNetProcessConfigurationsClient", "listByIntegrationAccountsNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByIntegrationAccountsComplete enumerates all values, automatically crossing page boundaries as required.
+func (client RosettaNetProcessConfigurationsClient) ListByIntegrationAccountsComplete(ctx context.Context, resourceGroupName string, integrationAccountName string, top *int32, filter string) (result IntegrationAccountRosettaNetProcessConfigurationListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/RosettaNetProcessConfigurationsClient.ListByIntegrationAccounts")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByIntegrationAccounts(ctx, resourceGroupName, integrationAccountName, top, filter)
+ return
+}
diff --git a/services/logic/mgmt/2016-06-01/logic/workflowruns.go b/services/logic/mgmt/2016-06-01/logic/workflowruns.go
index d40c2b4ed1f5..5049bb1e43e2 100644
--- a/services/logic/mgmt/2016-06-01/logic/workflowruns.go
+++ b/services/logic/mgmt/2016-06-01/logic/workflowruns.go
@@ -118,6 +118,84 @@ func (client WorkflowRunsClient) CancelResponder(resp *http.Response) (result au
return
}
+// Delete deletes a workflow run.
+// Parameters:
+// resourceGroupName - the resource group name.
+// workflowName - the workflow name.
+// runName - the workflow run name.
+func (client WorkflowRunsClient) Delete(ctx context.Context, resourceGroupName string, workflowName string, runName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/WorkflowRunsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, resourceGroupName, workflowName, runName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "logic.WorkflowRunsClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "logic.WorkflowRunsClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "logic.WorkflowRunsClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client WorkflowRunsClient) DeletePreparer(ctx context.Context, resourceGroupName string, workflowName string, runName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "runName": autorest.Encode("path", runName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "workflowName": autorest.Encode("path", workflowName),
+ }
+
+ const APIVersion = "2016-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/runs/{runName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client WorkflowRunsClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client WorkflowRunsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
// Get gets a workflow run.
// Parameters:
// resourceGroupName - the resource group name.
diff --git a/services/machinelearning/mgmt/2016-04-01/workspaces/workspaces.go b/services/machinelearning/mgmt/2016-04-01/workspaces/workspaces.go
index 6758465b04b2..a641c9b769d3 100644
--- a/services/machinelearning/mgmt/2016-04-01/workspaces/workspaces.go
+++ b/services/machinelearning/mgmt/2016-04-01/workspaces/workspaces.go
@@ -61,7 +61,9 @@ func (client Client) CreateOrUpdate(ctx context.Context, resourceGroupName strin
if err := validation.Validate([]validation.Validation{
{TargetValue: parameters,
Constraints: []validation.Constraint{{Target: "parameters.Properties", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "parameters.Properties.UserStorageAccountID", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil {
+ Chain: []validation.Constraint{{Target: "parameters.Properties.UserStorageAccountID", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.Properties.OwnerEmail", Name: validation.Null, Rule: true, Chain: nil},
+ }}}}}); err != nil {
return result, validation.NewError("workspaces.Client", "CreateOrUpdate", err.Error())
}
diff --git a/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices/client.go b/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices/client.go
new file mode 100644
index 000000000000..ad8ad4f10004
--- /dev/null
+++ b/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices/client.go
@@ -0,0 +1,51 @@
+// Package machinelearningservices implements the Azure ARM Machinelearningservices service API version 2019-06-01.
+//
+// These APIs allow end users to operate on Azure Machine Learning Workspace resources.
+package machinelearningservices
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/Azure/go-autorest/autorest"
+)
+
+const (
+ // DefaultBaseURI is the default URI used for the service Machinelearningservices
+ DefaultBaseURI = "https://management.azure.com"
+)
+
+// BaseClient is the base client for Machinelearningservices.
+type BaseClient struct {
+ autorest.Client
+ BaseURI string
+ SubscriptionID string
+}
+
+// New creates an instance of the BaseClient client.
+func New(subscriptionID string) BaseClient {
+ return NewWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewWithBaseURI creates an instance of the BaseClient client.
+func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient {
+ return BaseClient{
+ Client: autorest.NewClientWithUserAgent(UserAgent()),
+ BaseURI: baseURI,
+ SubscriptionID: subscriptionID,
+ }
+}
diff --git a/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices/machinelearningcompute.go b/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices/machinelearningcompute.go
new file mode 100644
index 000000000000..4964a1c41d68
--- /dev/null
+++ b/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices/machinelearningcompute.go
@@ -0,0 +1,646 @@
+package machinelearningservices
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// MachineLearningComputeClient is the these APIs allow end users to operate on Azure Machine Learning Workspace
+// resources.
+type MachineLearningComputeClient struct {
+ BaseClient
+}
+
+// NewMachineLearningComputeClient creates an instance of the MachineLearningComputeClient client.
+func NewMachineLearningComputeClient(subscriptionID string) MachineLearningComputeClient {
+ return NewMachineLearningComputeClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewMachineLearningComputeClientWithBaseURI creates an instance of the MachineLearningComputeClient client.
+func NewMachineLearningComputeClientWithBaseURI(baseURI string, subscriptionID string) MachineLearningComputeClient {
+ return MachineLearningComputeClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate creates or updates compute. This call will overwrite a compute if it exists. This is a nonrecoverable
+// operation. If your intent is to create a new compute, do a GET first to verify that it does not exist yet.
+// Parameters:
+// resourceGroupName - name of the resource group in which workspace is located.
+// workspaceName - name of Azure Machine Learning workspace.
+// computeName - name of the Azure Machine Learning compute.
+// parameters - payload with Machine Learning compute definition.
+func (client MachineLearningComputeClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, computeName string, parameters ComputeResource) (result MachineLearningComputeCreateOrUpdateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MachineLearningComputeClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, workspaceName, computeName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.MachineLearningComputeClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.MachineLearningComputeClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client MachineLearningComputeClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, workspaceName string, computeName string, parameters ComputeResource) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "computeName": autorest.Encode("path", computeName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "workspaceName": autorest.Encode("path", workspaceName),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client MachineLearningComputeClient) CreateOrUpdateSender(req *http.Request) (future MachineLearningComputeCreateOrUpdateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client MachineLearningComputeClient) CreateOrUpdateResponder(resp *http.Response) (result ComputeResource, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes specified Machine Learning compute.
+// Parameters:
+// resourceGroupName - name of the resource group in which workspace is located.
+// workspaceName - name of Azure Machine Learning workspace.
+// computeName - name of the Azure Machine Learning compute.
+// underlyingResourceAction - delete the underlying compute if 'Delete', or detach the underlying compute from
+// workspace if 'Detach'.
+func (client MachineLearningComputeClient) Delete(ctx context.Context, resourceGroupName string, workspaceName string, computeName string, underlyingResourceAction UnderlyingResourceAction) (result MachineLearningComputeDeleteFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MachineLearningComputeClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, resourceGroupName, workspaceName, computeName, underlyingResourceAction)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.MachineLearningComputeClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.DeleteSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.MachineLearningComputeClient", "Delete", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client MachineLearningComputeClient) DeletePreparer(ctx context.Context, resourceGroupName string, workspaceName string, computeName string, underlyingResourceAction UnderlyingResourceAction) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "computeName": autorest.Encode("path", computeName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "workspaceName": autorest.Encode("path", workspaceName),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ "underlyingResourceAction": autorest.Encode("query", underlyingResourceAction),
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client MachineLearningComputeClient) DeleteSender(req *http.Request) (future MachineLearningComputeDeleteFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client MachineLearningComputeClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets compute definition by its name. Any secrets (storage keys, service credentials, etc) are not returned - use
+// 'keys' nested resource to get them.
+// Parameters:
+// resourceGroupName - name of the resource group in which workspace is located.
+// workspaceName - name of Azure Machine Learning workspace.
+// computeName - name of the Azure Machine Learning compute.
+func (client MachineLearningComputeClient) Get(ctx context.Context, resourceGroupName string, workspaceName string, computeName string) (result ComputeResource, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MachineLearningComputeClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, resourceGroupName, workspaceName, computeName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.MachineLearningComputeClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "machinelearningservices.MachineLearningComputeClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.MachineLearningComputeClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client MachineLearningComputeClient) GetPreparer(ctx context.Context, resourceGroupName string, workspaceName string, computeName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "computeName": autorest.Encode("path", computeName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "workspaceName": autorest.Encode("path", workspaceName),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client MachineLearningComputeClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client MachineLearningComputeClient) GetResponder(resp *http.Response) (result ComputeResource, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByWorkspace gets computes in specified workspace.
+// Parameters:
+// resourceGroupName - name of the resource group in which workspace is located.
+// workspaceName - name of Azure Machine Learning workspace.
+// skiptoken - continuation token for pagination.
+func (client MachineLearningComputeClient) ListByWorkspace(ctx context.Context, resourceGroupName string, workspaceName string, skiptoken string) (result PaginatedComputeResourcesListPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MachineLearningComputeClient.ListByWorkspace")
+ defer func() {
+ sc := -1
+ if result.pcrl.Response.Response != nil {
+ sc = result.pcrl.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listByWorkspaceNextResults
+ req, err := client.ListByWorkspacePreparer(ctx, resourceGroupName, workspaceName, skiptoken)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.MachineLearningComputeClient", "ListByWorkspace", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByWorkspaceSender(req)
+ if err != nil {
+ result.pcrl.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "machinelearningservices.MachineLearningComputeClient", "ListByWorkspace", resp, "Failure sending request")
+ return
+ }
+
+ result.pcrl, err = client.ListByWorkspaceResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.MachineLearningComputeClient", "ListByWorkspace", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByWorkspacePreparer prepares the ListByWorkspace request.
+func (client MachineLearningComputeClient) ListByWorkspacePreparer(ctx context.Context, resourceGroupName string, workspaceName string, skiptoken string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "workspaceName": autorest.Encode("path", workspaceName),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(skiptoken) > 0 {
+ queryParameters["$skiptoken"] = autorest.Encode("query", skiptoken)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByWorkspaceSender sends the ListByWorkspace request. The method will close the
+// http.Response Body if it receives an error.
+func (client MachineLearningComputeClient) ListByWorkspaceSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByWorkspaceResponder handles the response to the ListByWorkspace request. The method always
+// closes the http.Response Body.
+func (client MachineLearningComputeClient) ListByWorkspaceResponder(resp *http.Response) (result PaginatedComputeResourcesList, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByWorkspaceNextResults retrieves the next set of results, if any.
+func (client MachineLearningComputeClient) listByWorkspaceNextResults(ctx context.Context, lastResults PaginatedComputeResourcesList) (result PaginatedComputeResourcesList, err error) {
+ req, err := lastResults.paginatedComputeResourcesListPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "machinelearningservices.MachineLearningComputeClient", "listByWorkspaceNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByWorkspaceSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "machinelearningservices.MachineLearningComputeClient", "listByWorkspaceNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByWorkspaceResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.MachineLearningComputeClient", "listByWorkspaceNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByWorkspaceComplete enumerates all values, automatically crossing page boundaries as required.
+func (client MachineLearningComputeClient) ListByWorkspaceComplete(ctx context.Context, resourceGroupName string, workspaceName string, skiptoken string) (result PaginatedComputeResourcesListIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MachineLearningComputeClient.ListByWorkspace")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByWorkspace(ctx, resourceGroupName, workspaceName, skiptoken)
+ return
+}
+
+// ListKeys gets secrets related to Machine Learning compute (storage keys, service credentials, etc).
+// Parameters:
+// resourceGroupName - name of the resource group in which workspace is located.
+// workspaceName - name of Azure Machine Learning workspace.
+// computeName - name of the Azure Machine Learning compute.
+func (client MachineLearningComputeClient) ListKeys(ctx context.Context, resourceGroupName string, workspaceName string, computeName string) (result ComputeSecretsModel, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MachineLearningComputeClient.ListKeys")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.ListKeysPreparer(ctx, resourceGroupName, workspaceName, computeName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.MachineLearningComputeClient", "ListKeys", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListKeysSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "machinelearningservices.MachineLearningComputeClient", "ListKeys", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListKeysResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.MachineLearningComputeClient", "ListKeys", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListKeysPreparer prepares the ListKeys request.
+func (client MachineLearningComputeClient) ListKeysPreparer(ctx context.Context, resourceGroupName string, workspaceName string, computeName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "computeName": autorest.Encode("path", computeName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "workspaceName": autorest.Encode("path", workspaceName),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/listKeys", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListKeysSender sends the ListKeys request. The method will close the
+// http.Response Body if it receives an error.
+func (client MachineLearningComputeClient) ListKeysSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListKeysResponder handles the response to the ListKeys request. The method always
+// closes the http.Response Body.
+func (client MachineLearningComputeClient) ListKeysResponder(resp *http.Response) (result ComputeSecretsModel, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListNodes get the details (e.g IP address, port etc) of all the compute nodes in the compute.
+// Parameters:
+// resourceGroupName - name of the resource group in which workspace is located.
+// workspaceName - name of Azure Machine Learning workspace.
+// computeName - name of the Azure Machine Learning compute.
+func (client MachineLearningComputeClient) ListNodes(ctx context.Context, resourceGroupName string, workspaceName string, computeName string) (result AmlComputeNodesInformation, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MachineLearningComputeClient.ListNodes")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.ListNodesPreparer(ctx, resourceGroupName, workspaceName, computeName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.MachineLearningComputeClient", "ListNodes", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListNodesSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "machinelearningservices.MachineLearningComputeClient", "ListNodes", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListNodesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.MachineLearningComputeClient", "ListNodes", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListNodesPreparer prepares the ListNodes request.
+func (client MachineLearningComputeClient) ListNodesPreparer(ctx context.Context, resourceGroupName string, workspaceName string, computeName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "computeName": autorest.Encode("path", computeName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "workspaceName": autorest.Encode("path", workspaceName),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}/listNodes", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListNodesSender sends the ListNodes request. The method will close the
+// http.Response Body if it receives an error.
+func (client MachineLearningComputeClient) ListNodesSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListNodesResponder handles the response to the ListNodes request. The method always
+// closes the http.Response Body.
+func (client MachineLearningComputeClient) ListNodesResponder(resp *http.Response) (result AmlComputeNodesInformation, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Update updates properties of a compute. This call will overwrite a compute if it exists. This is a nonrecoverable
+// operation.
+// Parameters:
+// resourceGroupName - name of the resource group in which workspace is located.
+// workspaceName - name of Azure Machine Learning workspace.
+// computeName - name of the Azure Machine Learning compute.
+// parameters - additional parameters for cluster update.
+func (client MachineLearningComputeClient) Update(ctx context.Context, resourceGroupName string, workspaceName string, computeName string, parameters ClusterUpdateParameters) (result MachineLearningComputeUpdateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MachineLearningComputeClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.UpdatePreparer(ctx, resourceGroupName, workspaceName, computeName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.MachineLearningComputeClient", "Update", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.UpdateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.MachineLearningComputeClient", "Update", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// UpdatePreparer prepares the Update request.
+func (client MachineLearningComputeClient) UpdatePreparer(ctx context.Context, resourceGroupName string, workspaceName string, computeName string, parameters ClusterUpdateParameters) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "computeName": autorest.Encode("path", computeName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "workspaceName": autorest.Encode("path", workspaceName),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/computes/{computeName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateSender sends the Update request. The method will close the
+// http.Response Body if it receives an error.
+func (client MachineLearningComputeClient) UpdateSender(req *http.Request) (future MachineLearningComputeUpdateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// UpdateResponder handles the response to the Update request. The method always
+// closes the http.Response Body.
+func (client MachineLearningComputeClient) UpdateResponder(resp *http.Response) (result ComputeResource, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices/machinelearningservicesapi/interfaces.go b/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices/machinelearningservicesapi/interfaces.go
new file mode 100644
index 000000000000..6eee7c0c701c
--- /dev/null
+++ b/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices/machinelearningservicesapi/interfaces.go
@@ -0,0 +1,72 @@
+package machinelearningservicesapi
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/azure-sdk-for-go/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices"
+ "github.com/Azure/go-autorest/autorest"
+)
+
+// OperationsClientAPI contains the set of methods on the OperationsClient type.
+type OperationsClientAPI interface {
+ List(ctx context.Context) (result machinelearningservices.OperationListResult, err error)
+}
+
+var _ OperationsClientAPI = (*machinelearningservices.OperationsClient)(nil)
+
+// WorkspacesClientAPI contains the set of methods on the WorkspacesClient type.
+type WorkspacesClientAPI interface {
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, parameters machinelearningservices.Workspace) (result machinelearningservices.Workspace, err error)
+ Delete(ctx context.Context, resourceGroupName string, workspaceName string) (result autorest.Response, err error)
+ Get(ctx context.Context, resourceGroupName string, workspaceName string) (result machinelearningservices.Workspace, err error)
+ ListByResourceGroup(ctx context.Context, resourceGroupName string, skiptoken string) (result machinelearningservices.WorkspaceListResultPage, err error)
+ ListBySubscription(ctx context.Context, skiptoken string) (result machinelearningservices.WorkspaceListResultPage, err error)
+ ListKeys(ctx context.Context, resourceGroupName string, workspaceName string) (result machinelearningservices.ListWorkspaceKeysResult, err error)
+ ResyncKeys(ctx context.Context, resourceGroupName string, workspaceName string) (result autorest.Response, err error)
+ Update(ctx context.Context, resourceGroupName string, workspaceName string, parameters machinelearningservices.WorkspaceUpdateParameters) (result machinelearningservices.Workspace, err error)
+}
+
+var _ WorkspacesClientAPI = (*machinelearningservices.WorkspacesClient)(nil)
+
+// UsagesClientAPI contains the set of methods on the UsagesClient type.
+type UsagesClientAPI interface {
+ List(ctx context.Context, location string) (result machinelearningservices.ListUsagesResultPage, err error)
+}
+
+var _ UsagesClientAPI = (*machinelearningservices.UsagesClient)(nil)
+
+// VirtualMachineSizesClientAPI contains the set of methods on the VirtualMachineSizesClient type.
+type VirtualMachineSizesClientAPI interface {
+ List(ctx context.Context, location string) (result machinelearningservices.VirtualMachineSizeListResult, err error)
+}
+
+var _ VirtualMachineSizesClientAPI = (*machinelearningservices.VirtualMachineSizesClient)(nil)
+
+// MachineLearningComputeClientAPI contains the set of methods on the MachineLearningComputeClient type.
+type MachineLearningComputeClientAPI interface {
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, computeName string, parameters machinelearningservices.ComputeResource) (result machinelearningservices.MachineLearningComputeCreateOrUpdateFuture, err error)
+ Delete(ctx context.Context, resourceGroupName string, workspaceName string, computeName string, underlyingResourceAction machinelearningservices.UnderlyingResourceAction) (result machinelearningservices.MachineLearningComputeDeleteFuture, err error)
+ Get(ctx context.Context, resourceGroupName string, workspaceName string, computeName string) (result machinelearningservices.ComputeResource, err error)
+ ListByWorkspace(ctx context.Context, resourceGroupName string, workspaceName string, skiptoken string) (result machinelearningservices.PaginatedComputeResourcesListPage, err error)
+ ListKeys(ctx context.Context, resourceGroupName string, workspaceName string, computeName string) (result machinelearningservices.ComputeSecretsModel, err error)
+ ListNodes(ctx context.Context, resourceGroupName string, workspaceName string, computeName string) (result machinelearningservices.AmlComputeNodesInformation, err error)
+ Update(ctx context.Context, resourceGroupName string, workspaceName string, computeName string, parameters machinelearningservices.ClusterUpdateParameters) (result machinelearningservices.MachineLearningComputeUpdateFuture, err error)
+}
+
+var _ MachineLearningComputeClientAPI = (*machinelearningservices.MachineLearningComputeClient)(nil)
diff --git a/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices/models.go b/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices/models.go
new file mode 100644
index 000000000000..4281ddbfef3b
--- /dev/null
+++ b/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices/models.go
@@ -0,0 +1,2665 @@
+package machinelearningservices
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "encoding/json"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/date"
+ "github.com/Azure/go-autorest/autorest/to"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// The package's fully qualified name.
+const fqdn = "github.com/Azure/azure-sdk-for-go/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices"
+
+// AllocationState enumerates the values for allocation state.
+type AllocationState string
+
+const (
+ // Resizing ...
+ Resizing AllocationState = "Resizing"
+ // Steady ...
+ Steady AllocationState = "Steady"
+)
+
+// PossibleAllocationStateValues returns an array of possible values for the AllocationState const type.
+func PossibleAllocationStateValues() []AllocationState {
+ return []AllocationState{Resizing, Steady}
+}
+
+// ComputeType enumerates the values for compute type.
+type ComputeType string
+
+const (
+ // ComputeTypeAKS ...
+ ComputeTypeAKS ComputeType = "AKS"
+ // ComputeTypeAmlCompute ...
+ ComputeTypeAmlCompute ComputeType = "AmlCompute"
+ // ComputeTypeDatabricks ...
+ ComputeTypeDatabricks ComputeType = "Databricks"
+ // ComputeTypeDataFactory ...
+ ComputeTypeDataFactory ComputeType = "DataFactory"
+ // ComputeTypeDataLakeAnalytics ...
+ ComputeTypeDataLakeAnalytics ComputeType = "DataLakeAnalytics"
+ // ComputeTypeHDInsight ...
+ ComputeTypeHDInsight ComputeType = "HDInsight"
+ // ComputeTypeVirtualMachine ...
+ ComputeTypeVirtualMachine ComputeType = "VirtualMachine"
+)
+
+// PossibleComputeTypeValues returns an array of possible values for the ComputeType const type.
+func PossibleComputeTypeValues() []ComputeType {
+ return []ComputeType{ComputeTypeAKS, ComputeTypeAmlCompute, ComputeTypeDatabricks, ComputeTypeDataFactory, ComputeTypeDataLakeAnalytics, ComputeTypeHDInsight, ComputeTypeVirtualMachine}
+}
+
+// ComputeTypeBasicCompute enumerates the values for compute type basic compute.
+type ComputeTypeBasicCompute string
+
+const (
+ // ComputeTypeAKS1 ...
+ ComputeTypeAKS1 ComputeTypeBasicCompute = "AKS"
+ // ComputeTypeAmlCompute1 ...
+ ComputeTypeAmlCompute1 ComputeTypeBasicCompute = "AmlCompute"
+ // ComputeTypeCompute ...
+ ComputeTypeCompute ComputeTypeBasicCompute = "Compute"
+ // ComputeTypeDatabricks1 ...
+ ComputeTypeDatabricks1 ComputeTypeBasicCompute = "Databricks"
+ // ComputeTypeDataFactory1 ...
+ ComputeTypeDataFactory1 ComputeTypeBasicCompute = "DataFactory"
+ // ComputeTypeDataLakeAnalytics1 ...
+ ComputeTypeDataLakeAnalytics1 ComputeTypeBasicCompute = "DataLakeAnalytics"
+ // ComputeTypeHDInsight1 ...
+ ComputeTypeHDInsight1 ComputeTypeBasicCompute = "HDInsight"
+ // ComputeTypeVirtualMachine1 ...
+ ComputeTypeVirtualMachine1 ComputeTypeBasicCompute = "VirtualMachine"
+)
+
+// PossibleComputeTypeBasicComputeValues returns an array of possible values for the ComputeTypeBasicCompute const type.
+func PossibleComputeTypeBasicComputeValues() []ComputeTypeBasicCompute {
+ return []ComputeTypeBasicCompute{ComputeTypeAKS1, ComputeTypeAmlCompute1, ComputeTypeCompute, ComputeTypeDatabricks1, ComputeTypeDataFactory1, ComputeTypeDataLakeAnalytics1, ComputeTypeHDInsight1, ComputeTypeVirtualMachine1}
+}
+
+// ComputeTypeBasicComputeNodesInformation enumerates the values for compute type basic compute nodes
+// information.
+type ComputeTypeBasicComputeNodesInformation string
+
+const (
+ // ComputeTypeBasicComputeNodesInformationComputeTypeAmlCompute ...
+ ComputeTypeBasicComputeNodesInformationComputeTypeAmlCompute ComputeTypeBasicComputeNodesInformation = "AmlCompute"
+ // ComputeTypeBasicComputeNodesInformationComputeTypeComputeNodesInformation ...
+ ComputeTypeBasicComputeNodesInformationComputeTypeComputeNodesInformation ComputeTypeBasicComputeNodesInformation = "ComputeNodesInformation"
+)
+
+// PossibleComputeTypeBasicComputeNodesInformationValues returns an array of possible values for the ComputeTypeBasicComputeNodesInformation const type.
+func PossibleComputeTypeBasicComputeNodesInformationValues() []ComputeTypeBasicComputeNodesInformation {
+ return []ComputeTypeBasicComputeNodesInformation{ComputeTypeBasicComputeNodesInformationComputeTypeAmlCompute, ComputeTypeBasicComputeNodesInformationComputeTypeComputeNodesInformation}
+}
+
+// ComputeTypeBasicComputeSecrets enumerates the values for compute type basic compute secrets.
+type ComputeTypeBasicComputeSecrets string
+
+const (
+ // ComputeTypeBasicComputeSecretsComputeTypeAKS ...
+ ComputeTypeBasicComputeSecretsComputeTypeAKS ComputeTypeBasicComputeSecrets = "AKS"
+ // ComputeTypeBasicComputeSecretsComputeTypeComputeSecrets ...
+ ComputeTypeBasicComputeSecretsComputeTypeComputeSecrets ComputeTypeBasicComputeSecrets = "ComputeSecrets"
+ // ComputeTypeBasicComputeSecretsComputeTypeDatabricks ...
+ ComputeTypeBasicComputeSecretsComputeTypeDatabricks ComputeTypeBasicComputeSecrets = "Databricks"
+ // ComputeTypeBasicComputeSecretsComputeTypeVirtualMachine ...
+ ComputeTypeBasicComputeSecretsComputeTypeVirtualMachine ComputeTypeBasicComputeSecrets = "VirtualMachine"
+)
+
+// PossibleComputeTypeBasicComputeSecretsValues returns an array of possible values for the ComputeTypeBasicComputeSecrets const type.
+func PossibleComputeTypeBasicComputeSecretsValues() []ComputeTypeBasicComputeSecrets {
+ return []ComputeTypeBasicComputeSecrets{ComputeTypeBasicComputeSecretsComputeTypeAKS, ComputeTypeBasicComputeSecretsComputeTypeComputeSecrets, ComputeTypeBasicComputeSecretsComputeTypeDatabricks, ComputeTypeBasicComputeSecretsComputeTypeVirtualMachine}
+}
+
+// NodeState enumerates the values for node state.
+type NodeState string
+
+const (
+ // Idle ...
+ Idle NodeState = "idle"
+ // Leaving ...
+ Leaving NodeState = "leaving"
+ // Preempted ...
+ Preempted NodeState = "preempted"
+ // Preparing ...
+ Preparing NodeState = "preparing"
+ // Running ...
+ Running NodeState = "running"
+ // Unusable ...
+ Unusable NodeState = "unusable"
+)
+
+// PossibleNodeStateValues returns an array of possible values for the NodeState const type.
+func PossibleNodeStateValues() []NodeState {
+ return []NodeState{Idle, Leaving, Preempted, Preparing, Running, Unusable}
+}
+
+// ProvisioningState enumerates the values for provisioning state.
+type ProvisioningState string
+
+const (
+ // Canceled ...
+ Canceled ProvisioningState = "Canceled"
+ // Creating ...
+ Creating ProvisioningState = "Creating"
+ // Deleting ...
+ Deleting ProvisioningState = "Deleting"
+ // Failed ...
+ Failed ProvisioningState = "Failed"
+ // Succeeded ...
+ Succeeded ProvisioningState = "Succeeded"
+ // Unknown ...
+ Unknown ProvisioningState = "Unknown"
+ // Updating ...
+ Updating ProvisioningState = "Updating"
+)
+
+// PossibleProvisioningStateValues returns an array of possible values for the ProvisioningState const type.
+func PossibleProvisioningStateValues() []ProvisioningState {
+ return []ProvisioningState{Canceled, Creating, Deleting, Failed, Succeeded, Unknown, Updating}
+}
+
+// RemoteLoginPortPublicAccess enumerates the values for remote login port public access.
+type RemoteLoginPortPublicAccess string
+
+const (
+ // Disabled ...
+ Disabled RemoteLoginPortPublicAccess = "Disabled"
+ // Enabled ...
+ Enabled RemoteLoginPortPublicAccess = "Enabled"
+ // NotSpecified ...
+ NotSpecified RemoteLoginPortPublicAccess = "NotSpecified"
+)
+
+// PossibleRemoteLoginPortPublicAccessValues returns an array of possible values for the RemoteLoginPortPublicAccess const type.
+func PossibleRemoteLoginPortPublicAccessValues() []RemoteLoginPortPublicAccess {
+ return []RemoteLoginPortPublicAccess{Disabled, Enabled, NotSpecified}
+}
+
+// ResourceIdentityType enumerates the values for resource identity type.
+type ResourceIdentityType string
+
+const (
+ // SystemAssigned ...
+ SystemAssigned ResourceIdentityType = "SystemAssigned"
+)
+
+// PossibleResourceIdentityTypeValues returns an array of possible values for the ResourceIdentityType const type.
+func PossibleResourceIdentityTypeValues() []ResourceIdentityType {
+ return []ResourceIdentityType{SystemAssigned}
+}
+
+// Status enumerates the values for status.
+type Status string
+
+const (
+ // StatusDisabled ...
+ StatusDisabled Status = "Disabled"
+ // StatusEnabled ...
+ StatusEnabled Status = "Enabled"
+)
+
+// PossibleStatusValues returns an array of possible values for the Status const type.
+func PossibleStatusValues() []Status {
+ return []Status{StatusDisabled, StatusEnabled}
+}
+
+// UnderlyingResourceAction enumerates the values for underlying resource action.
+type UnderlyingResourceAction string
+
+const (
+ // Delete ...
+ Delete UnderlyingResourceAction = "Delete"
+ // Detach ...
+ Detach UnderlyingResourceAction = "Detach"
+)
+
+// PossibleUnderlyingResourceActionValues returns an array of possible values for the UnderlyingResourceAction const type.
+func PossibleUnderlyingResourceActionValues() []UnderlyingResourceAction {
+ return []UnderlyingResourceAction{Delete, Detach}
+}
+
+// UsageUnit enumerates the values for usage unit.
+type UsageUnit string
+
+const (
+ // Count ...
+ Count UsageUnit = "Count"
+)
+
+// PossibleUsageUnitValues returns an array of possible values for the UsageUnit const type.
+func PossibleUsageUnitValues() []UsageUnit {
+ return []UsageUnit{Count}
+}
+
+// VMPriority enumerates the values for vm priority.
+type VMPriority string
+
+const (
+ // Dedicated ...
+ Dedicated VMPriority = "Dedicated"
+ // LowPriority ...
+ LowPriority VMPriority = "LowPriority"
+)
+
+// PossibleVMPriorityValues returns an array of possible values for the VMPriority const type.
+func PossibleVMPriorityValues() []VMPriority {
+ return []VMPriority{Dedicated, LowPriority}
+}
+
+// AKS a Machine Learning compute based on AKS.
+type AKS struct {
+ // Properties - AKS properties
+ Properties *AKSProperties `json:"properties,omitempty"`
+ // ComputeLocation - Location for the underlying compute
+ ComputeLocation *string `json:"computeLocation,omitempty"`
+ // ProvisioningState - READ-ONLY; The provision state of the cluster. Valid values are Unknown, Updating, Provisioning, Succeeded, and Failed. Possible values include: 'Unknown', 'Updating', 'Creating', 'Deleting', 'Succeeded', 'Failed', 'Canceled'
+ ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
+ // Description - The description of the Machine Learning compute.
+ Description *string `json:"description,omitempty"`
+ // CreatedOn - READ-ONLY; The date and time when the compute was created.
+ CreatedOn *date.Time `json:"createdOn,omitempty"`
+ // ModifiedOn - READ-ONLY; The date and time when the compute was last modified.
+ ModifiedOn *date.Time `json:"modifiedOn,omitempty"`
+ // ResourceID - ARM resource id of the underlying compute
+ ResourceID *string `json:"resourceId,omitempty"`
+ // ProvisioningErrors - READ-ONLY; Errors during provisioning
+ ProvisioningErrors *[]Error `json:"provisioningErrors,omitempty"`
+ // IsAttachedCompute - READ-ONLY; Indicating whether the compute was provisioned by user and brought from outside if true, or machine learning service provisioned it if false.
+ IsAttachedCompute *bool `json:"isAttachedCompute,omitempty"`
+ // ComputeType - Possible values include: 'ComputeTypeCompute', 'ComputeTypeAKS1', 'ComputeTypeAmlCompute1', 'ComputeTypeVirtualMachine1', 'ComputeTypeHDInsight1', 'ComputeTypeDataFactory1', 'ComputeTypeDatabricks1', 'ComputeTypeDataLakeAnalytics1'
+ ComputeType ComputeTypeBasicCompute `json:"computeType,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for AKS.
+func (a AKS) MarshalJSON() ([]byte, error) {
+ a.ComputeType = ComputeTypeAKS1
+ objectMap := make(map[string]interface{})
+ if a.Properties != nil {
+ objectMap["properties"] = a.Properties
+ }
+ if a.ComputeLocation != nil {
+ objectMap["computeLocation"] = a.ComputeLocation
+ }
+ if a.Description != nil {
+ objectMap["description"] = a.Description
+ }
+ if a.ResourceID != nil {
+ objectMap["resourceId"] = a.ResourceID
+ }
+ if a.ComputeType != "" {
+ objectMap["computeType"] = a.ComputeType
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsAKS is the BasicCompute implementation for AKS.
+func (a AKS) AsAKS() (*AKS, bool) {
+ return &a, true
+}
+
+// AsAmlCompute is the BasicCompute implementation for AKS.
+func (a AKS) AsAmlCompute() (*AmlCompute, bool) {
+ return nil, false
+}
+
+// AsVirtualMachine is the BasicCompute implementation for AKS.
+func (a AKS) AsVirtualMachine() (*VirtualMachine, bool) {
+ return nil, false
+}
+
+// AsHDInsight is the BasicCompute implementation for AKS.
+func (a AKS) AsHDInsight() (*HDInsight, bool) {
+ return nil, false
+}
+
+// AsDataFactory is the BasicCompute implementation for AKS.
+func (a AKS) AsDataFactory() (*DataFactory, bool) {
+ return nil, false
+}
+
+// AsDatabricks is the BasicCompute implementation for AKS.
+func (a AKS) AsDatabricks() (*Databricks, bool) {
+ return nil, false
+}
+
+// AsDataLakeAnalytics is the BasicCompute implementation for AKS.
+func (a AKS) AsDataLakeAnalytics() (*DataLakeAnalytics, bool) {
+ return nil, false
+}
+
+// AsCompute is the BasicCompute implementation for AKS.
+func (a AKS) AsCompute() (*Compute, bool) {
+ return nil, false
+}
+
+// AsBasicCompute is the BasicCompute implementation for AKS.
+func (a AKS) AsBasicCompute() (BasicCompute, bool) {
+ return &a, true
+}
+
+// AksComputeSecrets secrets related to a Machine Learning compute based on AKS.
+type AksComputeSecrets struct {
+ // UserKubeConfig - Content of kubeconfig file that can be used to connect to the Kubernetes cluster.
+ UserKubeConfig *string `json:"userKubeConfig,omitempty"`
+ // AdminKubeConfig - Content of kubeconfig file that can be used to connect to the Kubernetes cluster.
+ AdminKubeConfig *string `json:"adminKubeConfig,omitempty"`
+ // ImagePullSecretName - Image registry pull secret.
+ ImagePullSecretName *string `json:"imagePullSecretName,omitempty"`
+ // ComputeType - Possible values include: 'ComputeTypeBasicComputeSecretsComputeTypeComputeSecrets', 'ComputeTypeBasicComputeSecretsComputeTypeAKS', 'ComputeTypeBasicComputeSecretsComputeTypeVirtualMachine', 'ComputeTypeBasicComputeSecretsComputeTypeDatabricks'
+ ComputeType ComputeTypeBasicComputeSecrets `json:"computeType,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for AksComputeSecrets.
+func (acs AksComputeSecrets) MarshalJSON() ([]byte, error) {
+ acs.ComputeType = ComputeTypeBasicComputeSecretsComputeTypeAKS
+ objectMap := make(map[string]interface{})
+ if acs.UserKubeConfig != nil {
+ objectMap["userKubeConfig"] = acs.UserKubeConfig
+ }
+ if acs.AdminKubeConfig != nil {
+ objectMap["adminKubeConfig"] = acs.AdminKubeConfig
+ }
+ if acs.ImagePullSecretName != nil {
+ objectMap["imagePullSecretName"] = acs.ImagePullSecretName
+ }
+ if acs.ComputeType != "" {
+ objectMap["computeType"] = acs.ComputeType
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsAksComputeSecrets is the BasicComputeSecrets implementation for AksComputeSecrets.
+func (acs AksComputeSecrets) AsAksComputeSecrets() (*AksComputeSecrets, bool) {
+ return &acs, true
+}
+
+// AsVirtualMachineSecrets is the BasicComputeSecrets implementation for AksComputeSecrets.
+func (acs AksComputeSecrets) AsVirtualMachineSecrets() (*VirtualMachineSecrets, bool) {
+ return nil, false
+}
+
+// AsDatabricksComputeSecrets is the BasicComputeSecrets implementation for AksComputeSecrets.
+func (acs AksComputeSecrets) AsDatabricksComputeSecrets() (*DatabricksComputeSecrets, bool) {
+ return nil, false
+}
+
+// AsComputeSecrets is the BasicComputeSecrets implementation for AksComputeSecrets.
+func (acs AksComputeSecrets) AsComputeSecrets() (*ComputeSecrets, bool) {
+ return nil, false
+}
+
+// AsBasicComputeSecrets is the BasicComputeSecrets implementation for AksComputeSecrets.
+func (acs AksComputeSecrets) AsBasicComputeSecrets() (BasicComputeSecrets, bool) {
+ return &acs, true
+}
+
+// AksNetworkingConfiguration advance configuration for AKS networking
+type AksNetworkingConfiguration struct {
+ // SubnetID - Virtual network subnet resource ID the compute nodes belong to
+ SubnetID *string `json:"subnetId,omitempty"`
+ // ServiceCidr - A CIDR notation IP range from which to assign service cluster IPs. It must not overlap with any Subnet IP ranges.
+ ServiceCidr *string `json:"serviceCidr,omitempty"`
+ // DNSServiceIP - An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes service address range specified in serviceCidr.
+ DNSServiceIP *string `json:"dnsServiceIP,omitempty"`
+ // DockerBridgeCidr - A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes service address range.
+ DockerBridgeCidr *string `json:"dockerBridgeCidr,omitempty"`
+}
+
+// AKSProperties AKS properties
+type AKSProperties struct {
+ // ClusterFqdn - Cluster full qualified domain name
+ ClusterFqdn *string `json:"clusterFqdn,omitempty"`
+ // SystemServices - READ-ONLY; System services
+ SystemServices *[]SystemService `json:"systemServices,omitempty"`
+ // AgentCount - Number of agents
+ AgentCount *int32 `json:"agentCount,omitempty"`
+ // AgentVMSize - Agent virtual machine size
+ AgentVMSize *string `json:"agentVMSize,omitempty"`
+ // SslConfiguration - SSL configuration
+ SslConfiguration *SslConfiguration `json:"sslConfiguration,omitempty"`
+ // AksNetworkingConfiguration - AKS networking configuration for vnet
+ AksNetworkingConfiguration *AksNetworkingConfiguration `json:"aksNetworkingConfiguration,omitempty"`
+}
+
+// AmlCompute an Azure Machine Learning compute.
+type AmlCompute struct {
+ // Properties - AML Compute properties
+ Properties *AmlComputeProperties `json:"properties,omitempty"`
+ // ComputeLocation - Location for the underlying compute
+ ComputeLocation *string `json:"computeLocation,omitempty"`
+ // ProvisioningState - READ-ONLY; The provision state of the cluster. Valid values are Unknown, Updating, Provisioning, Succeeded, and Failed. Possible values include: 'Unknown', 'Updating', 'Creating', 'Deleting', 'Succeeded', 'Failed', 'Canceled'
+ ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
+ // Description - The description of the Machine Learning compute.
+ Description *string `json:"description,omitempty"`
+ // CreatedOn - READ-ONLY; The date and time when the compute was created.
+ CreatedOn *date.Time `json:"createdOn,omitempty"`
+ // ModifiedOn - READ-ONLY; The date and time when the compute was last modified.
+ ModifiedOn *date.Time `json:"modifiedOn,omitempty"`
+ // ResourceID - ARM resource id of the underlying compute
+ ResourceID *string `json:"resourceId,omitempty"`
+ // ProvisioningErrors - READ-ONLY; Errors during provisioning
+ ProvisioningErrors *[]Error `json:"provisioningErrors,omitempty"`
+ // IsAttachedCompute - READ-ONLY; Indicating whether the compute was provisioned by user and brought from outside if true, or machine learning service provisioned it if false.
+ IsAttachedCompute *bool `json:"isAttachedCompute,omitempty"`
+ // ComputeType - Possible values include: 'ComputeTypeCompute', 'ComputeTypeAKS1', 'ComputeTypeAmlCompute1', 'ComputeTypeVirtualMachine1', 'ComputeTypeHDInsight1', 'ComputeTypeDataFactory1', 'ComputeTypeDatabricks1', 'ComputeTypeDataLakeAnalytics1'
+ ComputeType ComputeTypeBasicCompute `json:"computeType,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for AmlCompute.
+func (ac AmlCompute) MarshalJSON() ([]byte, error) {
+ ac.ComputeType = ComputeTypeAmlCompute1
+ objectMap := make(map[string]interface{})
+ if ac.Properties != nil {
+ objectMap["properties"] = ac.Properties
+ }
+ if ac.ComputeLocation != nil {
+ objectMap["computeLocation"] = ac.ComputeLocation
+ }
+ if ac.Description != nil {
+ objectMap["description"] = ac.Description
+ }
+ if ac.ResourceID != nil {
+ objectMap["resourceId"] = ac.ResourceID
+ }
+ if ac.ComputeType != "" {
+ objectMap["computeType"] = ac.ComputeType
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsAKS is the BasicCompute implementation for AmlCompute.
+func (ac AmlCompute) AsAKS() (*AKS, bool) {
+ return nil, false
+}
+
+// AsAmlCompute is the BasicCompute implementation for AmlCompute.
+func (ac AmlCompute) AsAmlCompute() (*AmlCompute, bool) {
+ return &ac, true
+}
+
+// AsVirtualMachine is the BasicCompute implementation for AmlCompute.
+func (ac AmlCompute) AsVirtualMachine() (*VirtualMachine, bool) {
+ return nil, false
+}
+
+// AsHDInsight is the BasicCompute implementation for AmlCompute.
+func (ac AmlCompute) AsHDInsight() (*HDInsight, bool) {
+ return nil, false
+}
+
+// AsDataFactory is the BasicCompute implementation for AmlCompute.
+func (ac AmlCompute) AsDataFactory() (*DataFactory, bool) {
+ return nil, false
+}
+
+// AsDatabricks is the BasicCompute implementation for AmlCompute.
+func (ac AmlCompute) AsDatabricks() (*Databricks, bool) {
+ return nil, false
+}
+
+// AsDataLakeAnalytics is the BasicCompute implementation for AmlCompute.
+func (ac AmlCompute) AsDataLakeAnalytics() (*DataLakeAnalytics, bool) {
+ return nil, false
+}
+
+// AsCompute is the BasicCompute implementation for AmlCompute.
+func (ac AmlCompute) AsCompute() (*Compute, bool) {
+ return nil, false
+}
+
+// AsBasicCompute is the BasicCompute implementation for AmlCompute.
+func (ac AmlCompute) AsBasicCompute() (BasicCompute, bool) {
+ return &ac, true
+}
+
+// AmlComputeNodeInformation compute node information related to a AmlCompute.
+type AmlComputeNodeInformation struct {
+ // NodeID - READ-ONLY; ID of the compute node.
+ NodeID *string `json:"nodeId,omitempty"`
+ // PrivateIPAddress - READ-ONLY; Private IP address of the compute node.
+ PrivateIPAddress *string `json:"privateIpAddress,omitempty"`
+ // PublicIPAddress - READ-ONLY; Public IP address of the compute node.
+ PublicIPAddress *string `json:"publicIpAddress,omitempty"`
+ // Port - READ-ONLY; SSH port number of the node.
+ Port *float64 `json:"port,omitempty"`
+ // NodeState - READ-ONLY; State of the compute node. Values are idle, running, preparing, unusable, leaving and preempted. Possible values include: 'Idle', 'Running', 'Preparing', 'Unusable', 'Leaving', 'Preempted'
+ NodeState NodeState `json:"nodeState,omitempty"`
+ // RunID - READ-ONLY; ID of the Experiment running on the node, if any else null.
+ RunID *string `json:"runId,omitempty"`
+}
+
+// AmlComputeNodesInformation compute node information related to a AmlCompute.
+type AmlComputeNodesInformation struct {
+ autorest.Response `json:"-"`
+ // Nodes - READ-ONLY; The collection of returned AmlCompute nodes details.
+ Nodes *[]AmlComputeNodeInformation `json:"nodes,omitempty"`
+ // NextLink - READ-ONLY; The continuation token.
+ NextLink *string `json:"nextLink,omitempty"`
+ // ComputeType - Possible values include: 'ComputeTypeBasicComputeNodesInformationComputeTypeComputeNodesInformation', 'ComputeTypeBasicComputeNodesInformationComputeTypeAmlCompute'
+ ComputeType ComputeTypeBasicComputeNodesInformation `json:"computeType,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for AmlComputeNodesInformation.
+func (acni AmlComputeNodesInformation) MarshalJSON() ([]byte, error) {
+ acni.ComputeType = ComputeTypeBasicComputeNodesInformationComputeTypeAmlCompute
+ objectMap := make(map[string]interface{})
+ if acni.ComputeType != "" {
+ objectMap["computeType"] = acni.ComputeType
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsAmlComputeNodesInformation is the BasicComputeNodesInformation implementation for AmlComputeNodesInformation.
+func (acni AmlComputeNodesInformation) AsAmlComputeNodesInformation() (*AmlComputeNodesInformation, bool) {
+ return &acni, true
+}
+
+// AsComputeNodesInformation is the BasicComputeNodesInformation implementation for AmlComputeNodesInformation.
+func (acni AmlComputeNodesInformation) AsComputeNodesInformation() (*ComputeNodesInformation, bool) {
+ return nil, false
+}
+
+// AsBasicComputeNodesInformation is the BasicComputeNodesInformation implementation for AmlComputeNodesInformation.
+func (acni AmlComputeNodesInformation) AsBasicComputeNodesInformation() (BasicComputeNodesInformation, bool) {
+ return &acni, true
+}
+
+// AmlComputeProperties AML Compute properties
+type AmlComputeProperties struct {
+ // VMSize - Virtual Machine Size
+ VMSize *string `json:"vmSize,omitempty"`
+ // VMPriority - Virtual Machine priority. Possible values include: 'Dedicated', 'LowPriority'
+ VMPriority VMPriority `json:"vmPriority,omitempty"`
+ // ScaleSettings - Scale settings for AML Compute
+ ScaleSettings *ScaleSettings `json:"scaleSettings,omitempty"`
+ // UserAccountCredentials - Credentials for an administrator user account that will be created on each compute node.
+ UserAccountCredentials *UserAccountCredentials `json:"userAccountCredentials,omitempty"`
+ // Subnet - Virtual network subnet resource ID the compute nodes belong to.
+ Subnet *ResourceID `json:"subnet,omitempty"`
+ // RemoteLoginPortPublicAccess - State of the public SSH port. Possible values are: Disabled - Indicates that the public ssh port is closed on all nodes of the cluster. Enabled - Indicates that the public ssh port is open on all nodes of the cluster. NotSpecified - Indicates that the public ssh port is closed on all nodes of the cluster if VNet is defined, else is open all public nodes. It can be default only during cluster creation time, after creation it will be either enabled or disabled. Possible values include: 'Enabled', 'Disabled', 'NotSpecified'
+ RemoteLoginPortPublicAccess RemoteLoginPortPublicAccess `json:"remoteLoginPortPublicAccess,omitempty"`
+ // AllocationState - READ-ONLY; Allocation state of the compute. Possible values are: steady - Indicates that the compute is not resizing. There are no changes to the number of compute nodes in the compute in progress. A compute enters this state when it is created and when no operations are being performed on the compute to change the number of compute nodes. resizing - Indicates that the compute is resizing; that is, compute nodes are being added to or removed from the compute. Possible values include: 'Steady', 'Resizing'
+ AllocationState AllocationState `json:"allocationState,omitempty"`
+ // AllocationStateTransitionTime - READ-ONLY; The time at which the compute entered its current allocation state.
+ AllocationStateTransitionTime *date.Time `json:"allocationStateTransitionTime,omitempty"`
+ // Errors - READ-ONLY; Collection of errors encountered by various compute nodes during node setup.
+ Errors *[]Error `json:"errors,omitempty"`
+ // CurrentNodeCount - READ-ONLY; The number of compute nodes currently assigned to the compute.
+ CurrentNodeCount *int32 `json:"currentNodeCount,omitempty"`
+ // TargetNodeCount - READ-ONLY; The target number of compute nodes for the compute. If the allocationState is resizing, this property denotes the target node count for the ongoing resize operation. If the allocationState is steady, this property denotes the target node count for the previous resize operation.
+ TargetNodeCount *int32 `json:"targetNodeCount,omitempty"`
+ // NodeStateCounts - READ-ONLY; Counts of various node states on the compute.
+ NodeStateCounts *NodeStateCounts `json:"nodeStateCounts,omitempty"`
+}
+
+// ClusterUpdateParameters amlCompute update parameters.
+type ClusterUpdateParameters struct {
+ // ClusterUpdateProperties - The properties of the amlCompute.
+ *ClusterUpdateProperties `json:"properties,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ClusterUpdateParameters.
+func (cup ClusterUpdateParameters) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if cup.ClusterUpdateProperties != nil {
+ objectMap["properties"] = cup.ClusterUpdateProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ClusterUpdateParameters struct.
+func (cup *ClusterUpdateParameters) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var clusterUpdateProperties ClusterUpdateProperties
+ err = json.Unmarshal(*v, &clusterUpdateProperties)
+ if err != nil {
+ return err
+ }
+ cup.ClusterUpdateProperties = &clusterUpdateProperties
+ }
+ }
+ }
+
+ return nil
+}
+
+// ClusterUpdateProperties the properties of a amlCompute that need to be updated.
+type ClusterUpdateProperties struct {
+ // ScaleSettings - Desired scale settings for the amlCompute.
+ ScaleSettings *ScaleSettings `json:"scaleSettings,omitempty"`
+}
+
+// BasicCompute machine Learning compute object.
+type BasicCompute interface {
+ AsAKS() (*AKS, bool)
+ AsAmlCompute() (*AmlCompute, bool)
+ AsVirtualMachine() (*VirtualMachine, bool)
+ AsHDInsight() (*HDInsight, bool)
+ AsDataFactory() (*DataFactory, bool)
+ AsDatabricks() (*Databricks, bool)
+ AsDataLakeAnalytics() (*DataLakeAnalytics, bool)
+ AsCompute() (*Compute, bool)
+}
+
+// Compute machine Learning compute object.
+type Compute struct {
+ // ComputeLocation - Location for the underlying compute
+ ComputeLocation *string `json:"computeLocation,omitempty"`
+ // ProvisioningState - READ-ONLY; The provision state of the cluster. Valid values are Unknown, Updating, Provisioning, Succeeded, and Failed. Possible values include: 'Unknown', 'Updating', 'Creating', 'Deleting', 'Succeeded', 'Failed', 'Canceled'
+ ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
+ // Description - The description of the Machine Learning compute.
+ Description *string `json:"description,omitempty"`
+ // CreatedOn - READ-ONLY; The date and time when the compute was created.
+ CreatedOn *date.Time `json:"createdOn,omitempty"`
+ // ModifiedOn - READ-ONLY; The date and time when the compute was last modified.
+ ModifiedOn *date.Time `json:"modifiedOn,omitempty"`
+ // ResourceID - ARM resource id of the underlying compute
+ ResourceID *string `json:"resourceId,omitempty"`
+ // ProvisioningErrors - READ-ONLY; Errors during provisioning
+ ProvisioningErrors *[]Error `json:"provisioningErrors,omitempty"`
+ // IsAttachedCompute - READ-ONLY; Indicating whether the compute was provisioned by user and brought from outside if true, or machine learning service provisioned it if false.
+ IsAttachedCompute *bool `json:"isAttachedCompute,omitempty"`
+ // ComputeType - Possible values include: 'ComputeTypeCompute', 'ComputeTypeAKS1', 'ComputeTypeAmlCompute1', 'ComputeTypeVirtualMachine1', 'ComputeTypeHDInsight1', 'ComputeTypeDataFactory1', 'ComputeTypeDatabricks1', 'ComputeTypeDataLakeAnalytics1'
+ ComputeType ComputeTypeBasicCompute `json:"computeType,omitempty"`
+}
+
+func unmarshalBasicCompute(body []byte) (BasicCompute, error) {
+ var m map[string]interface{}
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return nil, err
+ }
+
+ switch m["computeType"] {
+ case string(ComputeTypeAKS1):
+ var a AKS
+ err := json.Unmarshal(body, &a)
+ return a, err
+ case string(ComputeTypeAmlCompute1):
+ var ac AmlCompute
+ err := json.Unmarshal(body, &ac)
+ return ac, err
+ case string(ComputeTypeVirtualMachine1):
+ var VM VirtualMachine
+ err := json.Unmarshal(body, &VM)
+ return VM, err
+ case string(ComputeTypeHDInsight1):
+ var hi HDInsight
+ err := json.Unmarshal(body, &hi)
+ return hi, err
+ case string(ComputeTypeDataFactory1):
+ var df DataFactory
+ err := json.Unmarshal(body, &df)
+ return df, err
+ case string(ComputeTypeDatabricks1):
+ var d Databricks
+ err := json.Unmarshal(body, &d)
+ return d, err
+ case string(ComputeTypeDataLakeAnalytics1):
+ var dla DataLakeAnalytics
+ err := json.Unmarshal(body, &dla)
+ return dla, err
+ default:
+ var c Compute
+ err := json.Unmarshal(body, &c)
+ return c, err
+ }
+}
+func unmarshalBasicComputeArray(body []byte) ([]BasicCompute, error) {
+ var rawMessages []*json.RawMessage
+ err := json.Unmarshal(body, &rawMessages)
+ if err != nil {
+ return nil, err
+ }
+
+ cArray := make([]BasicCompute, len(rawMessages))
+
+ for index, rawMessage := range rawMessages {
+ c, err := unmarshalBasicCompute(*rawMessage)
+ if err != nil {
+ return nil, err
+ }
+ cArray[index] = c
+ }
+ return cArray, nil
+}
+
+// MarshalJSON is the custom marshaler for Compute.
+func (c Compute) MarshalJSON() ([]byte, error) {
+ c.ComputeType = ComputeTypeCompute
+ objectMap := make(map[string]interface{})
+ if c.ComputeLocation != nil {
+ objectMap["computeLocation"] = c.ComputeLocation
+ }
+ if c.Description != nil {
+ objectMap["description"] = c.Description
+ }
+ if c.ResourceID != nil {
+ objectMap["resourceId"] = c.ResourceID
+ }
+ if c.ComputeType != "" {
+ objectMap["computeType"] = c.ComputeType
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsAKS is the BasicCompute implementation for Compute.
+func (c Compute) AsAKS() (*AKS, bool) {
+ return nil, false
+}
+
+// AsAmlCompute is the BasicCompute implementation for Compute.
+func (c Compute) AsAmlCompute() (*AmlCompute, bool) {
+ return nil, false
+}
+
+// AsVirtualMachine is the BasicCompute implementation for Compute.
+func (c Compute) AsVirtualMachine() (*VirtualMachine, bool) {
+ return nil, false
+}
+
+// AsHDInsight is the BasicCompute implementation for Compute.
+func (c Compute) AsHDInsight() (*HDInsight, bool) {
+ return nil, false
+}
+
+// AsDataFactory is the BasicCompute implementation for Compute.
+func (c Compute) AsDataFactory() (*DataFactory, bool) {
+ return nil, false
+}
+
+// AsDatabricks is the BasicCompute implementation for Compute.
+func (c Compute) AsDatabricks() (*Databricks, bool) {
+ return nil, false
+}
+
+// AsDataLakeAnalytics is the BasicCompute implementation for Compute.
+func (c Compute) AsDataLakeAnalytics() (*DataLakeAnalytics, bool) {
+ return nil, false
+}
+
+// AsCompute is the BasicCompute implementation for Compute.
+func (c Compute) AsCompute() (*Compute, bool) {
+ return &c, true
+}
+
+// AsBasicCompute is the BasicCompute implementation for Compute.
+func (c Compute) AsBasicCompute() (BasicCompute, bool) {
+ return &c, true
+}
+
+// BasicComputeNodesInformation compute nodes information related to a Machine Learning compute. Might differ for every
+// type of compute.
+type BasicComputeNodesInformation interface {
+ AsAmlComputeNodesInformation() (*AmlComputeNodesInformation, bool)
+ AsComputeNodesInformation() (*ComputeNodesInformation, bool)
+}
+
+// ComputeNodesInformation compute nodes information related to a Machine Learning compute. Might differ for
+// every type of compute.
+type ComputeNodesInformation struct {
+ // NextLink - READ-ONLY; The continuation token.
+ NextLink *string `json:"nextLink,omitempty"`
+ // ComputeType - Possible values include: 'ComputeTypeBasicComputeNodesInformationComputeTypeComputeNodesInformation', 'ComputeTypeBasicComputeNodesInformationComputeTypeAmlCompute'
+ ComputeType ComputeTypeBasicComputeNodesInformation `json:"computeType,omitempty"`
+}
+
+func unmarshalBasicComputeNodesInformation(body []byte) (BasicComputeNodesInformation, error) {
+ var m map[string]interface{}
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return nil, err
+ }
+
+ switch m["computeType"] {
+ case string(ComputeTypeBasicComputeNodesInformationComputeTypeAmlCompute):
+ var acni AmlComputeNodesInformation
+ err := json.Unmarshal(body, &acni)
+ return acni, err
+ default:
+ var cni ComputeNodesInformation
+ err := json.Unmarshal(body, &cni)
+ return cni, err
+ }
+}
+func unmarshalBasicComputeNodesInformationArray(body []byte) ([]BasicComputeNodesInformation, error) {
+ var rawMessages []*json.RawMessage
+ err := json.Unmarshal(body, &rawMessages)
+ if err != nil {
+ return nil, err
+ }
+
+ cniArray := make([]BasicComputeNodesInformation, len(rawMessages))
+
+ for index, rawMessage := range rawMessages {
+ cni, err := unmarshalBasicComputeNodesInformation(*rawMessage)
+ if err != nil {
+ return nil, err
+ }
+ cniArray[index] = cni
+ }
+ return cniArray, nil
+}
+
+// MarshalJSON is the custom marshaler for ComputeNodesInformation.
+func (cni ComputeNodesInformation) MarshalJSON() ([]byte, error) {
+ cni.ComputeType = ComputeTypeBasicComputeNodesInformationComputeTypeComputeNodesInformation
+ objectMap := make(map[string]interface{})
+ if cni.ComputeType != "" {
+ objectMap["computeType"] = cni.ComputeType
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsAmlComputeNodesInformation is the BasicComputeNodesInformation implementation for ComputeNodesInformation.
+func (cni ComputeNodesInformation) AsAmlComputeNodesInformation() (*AmlComputeNodesInformation, bool) {
+ return nil, false
+}
+
+// AsComputeNodesInformation is the BasicComputeNodesInformation implementation for ComputeNodesInformation.
+func (cni ComputeNodesInformation) AsComputeNodesInformation() (*ComputeNodesInformation, bool) {
+ return &cni, true
+}
+
+// AsBasicComputeNodesInformation is the BasicComputeNodesInformation implementation for ComputeNodesInformation.
+func (cni ComputeNodesInformation) AsBasicComputeNodesInformation() (BasicComputeNodesInformation, bool) {
+ return &cni, true
+}
+
+// ComputeResource machine Learning compute object wrapped into ARM resource envelope.
+type ComputeResource struct {
+ autorest.Response `json:"-"`
+ // Properties - Compute properties
+ Properties BasicCompute `json:"properties,omitempty"`
+ // ID - READ-ONLY; Specifies the resource ID.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Specifies the name of the resource.
+ Name *string `json:"name,omitempty"`
+ // Identity - READ-ONLY; The identity of the resource.
+ Identity *Identity `json:"identity,omitempty"`
+ // Location - Specifies the location of the resource.
+ Location *string `json:"location,omitempty"`
+ // Type - READ-ONLY; Specifies the type of the resource.
+ Type *string `json:"type,omitempty"`
+ // Tags - Contains resource tags defined as key/value pairs.
+ Tags map[string]*string `json:"tags"`
+}
+
+// MarshalJSON is the custom marshaler for ComputeResource.
+func (cr ComputeResource) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ objectMap["properties"] = cr.Properties
+ if cr.Location != nil {
+ objectMap["location"] = cr.Location
+ }
+ if cr.Tags != nil {
+ objectMap["tags"] = cr.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ComputeResource struct.
+func (cr *ComputeResource) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ properties, err := unmarshalBasicCompute(*v)
+ if err != nil {
+ return err
+ }
+ cr.Properties = properties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ cr.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ cr.Name = &name
+ }
+ case "identity":
+ if v != nil {
+ var identity Identity
+ err = json.Unmarshal(*v, &identity)
+ if err != nil {
+ return err
+ }
+ cr.Identity = &identity
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ cr.Location = &location
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ cr.Type = &typeVar
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ cr.Tags = tags
+ }
+ }
+ }
+
+ return nil
+}
+
+// BasicComputeSecrets secrets related to a Machine Learning compute. Might differ for every type of compute.
+type BasicComputeSecrets interface {
+ AsAksComputeSecrets() (*AksComputeSecrets, bool)
+ AsVirtualMachineSecrets() (*VirtualMachineSecrets, bool)
+ AsDatabricksComputeSecrets() (*DatabricksComputeSecrets, bool)
+ AsComputeSecrets() (*ComputeSecrets, bool)
+}
+
+// ComputeSecrets secrets related to a Machine Learning compute. Might differ for every type of compute.
+type ComputeSecrets struct {
+ autorest.Response `json:"-"`
+ // ComputeType - Possible values include: 'ComputeTypeBasicComputeSecretsComputeTypeComputeSecrets', 'ComputeTypeBasicComputeSecretsComputeTypeAKS', 'ComputeTypeBasicComputeSecretsComputeTypeVirtualMachine', 'ComputeTypeBasicComputeSecretsComputeTypeDatabricks'
+ ComputeType ComputeTypeBasicComputeSecrets `json:"computeType,omitempty"`
+}
+
+func unmarshalBasicComputeSecrets(body []byte) (BasicComputeSecrets, error) {
+ var m map[string]interface{}
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return nil, err
+ }
+
+ switch m["computeType"] {
+ case string(ComputeTypeBasicComputeSecretsComputeTypeAKS):
+ var acs AksComputeSecrets
+ err := json.Unmarshal(body, &acs)
+ return acs, err
+ case string(ComputeTypeBasicComputeSecretsComputeTypeVirtualMachine):
+ var vms VirtualMachineSecrets
+ err := json.Unmarshal(body, &vms)
+ return vms, err
+ case string(ComputeTypeBasicComputeSecretsComputeTypeDatabricks):
+ var dcs DatabricksComputeSecrets
+ err := json.Unmarshal(body, &dcs)
+ return dcs, err
+ default:
+ var cs ComputeSecrets
+ err := json.Unmarshal(body, &cs)
+ return cs, err
+ }
+}
+func unmarshalBasicComputeSecretsArray(body []byte) ([]BasicComputeSecrets, error) {
+ var rawMessages []*json.RawMessage
+ err := json.Unmarshal(body, &rawMessages)
+ if err != nil {
+ return nil, err
+ }
+
+ csArray := make([]BasicComputeSecrets, len(rawMessages))
+
+ for index, rawMessage := range rawMessages {
+ cs, err := unmarshalBasicComputeSecrets(*rawMessage)
+ if err != nil {
+ return nil, err
+ }
+ csArray[index] = cs
+ }
+ return csArray, nil
+}
+
+// MarshalJSON is the custom marshaler for ComputeSecrets.
+func (cs ComputeSecrets) MarshalJSON() ([]byte, error) {
+ cs.ComputeType = ComputeTypeBasicComputeSecretsComputeTypeComputeSecrets
+ objectMap := make(map[string]interface{})
+ if cs.ComputeType != "" {
+ objectMap["computeType"] = cs.ComputeType
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsAksComputeSecrets is the BasicComputeSecrets implementation for ComputeSecrets.
+func (cs ComputeSecrets) AsAksComputeSecrets() (*AksComputeSecrets, bool) {
+ return nil, false
+}
+
+// AsVirtualMachineSecrets is the BasicComputeSecrets implementation for ComputeSecrets.
+func (cs ComputeSecrets) AsVirtualMachineSecrets() (*VirtualMachineSecrets, bool) {
+ return nil, false
+}
+
+// AsDatabricksComputeSecrets is the BasicComputeSecrets implementation for ComputeSecrets.
+func (cs ComputeSecrets) AsDatabricksComputeSecrets() (*DatabricksComputeSecrets, bool) {
+ return nil, false
+}
+
+// AsComputeSecrets is the BasicComputeSecrets implementation for ComputeSecrets.
+func (cs ComputeSecrets) AsComputeSecrets() (*ComputeSecrets, bool) {
+ return &cs, true
+}
+
+// AsBasicComputeSecrets is the BasicComputeSecrets implementation for ComputeSecrets.
+func (cs ComputeSecrets) AsBasicComputeSecrets() (BasicComputeSecrets, bool) {
+ return &cs, true
+}
+
+// ComputeSecretsModel ...
+type ComputeSecretsModel struct {
+ autorest.Response `json:"-"`
+ Value BasicComputeSecrets `json:"value,omitempty"`
+}
+
+// UnmarshalJSON is the custom unmarshaler for ComputeSecretsModel struct.
+func (csm *ComputeSecretsModel) UnmarshalJSON(body []byte) error {
+ cs, err := unmarshalBasicComputeSecrets(body)
+ if err != nil {
+ return err
+ }
+ csm.Value = cs
+
+ return nil
+}
+
+// Databricks a DataFactory compute.
+type Databricks struct {
+ Properties *DatabricksProperties `json:"properties,omitempty"`
+ // ComputeLocation - Location for the underlying compute
+ ComputeLocation *string `json:"computeLocation,omitempty"`
+ // ProvisioningState - READ-ONLY; The provision state of the cluster. Valid values are Unknown, Updating, Provisioning, Succeeded, and Failed. Possible values include: 'Unknown', 'Updating', 'Creating', 'Deleting', 'Succeeded', 'Failed', 'Canceled'
+ ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
+ // Description - The description of the Machine Learning compute.
+ Description *string `json:"description,omitempty"`
+ // CreatedOn - READ-ONLY; The date and time when the compute was created.
+ CreatedOn *date.Time `json:"createdOn,omitempty"`
+ // ModifiedOn - READ-ONLY; The date and time when the compute was last modified.
+ ModifiedOn *date.Time `json:"modifiedOn,omitempty"`
+ // ResourceID - ARM resource id of the underlying compute
+ ResourceID *string `json:"resourceId,omitempty"`
+ // ProvisioningErrors - READ-ONLY; Errors during provisioning
+ ProvisioningErrors *[]Error `json:"provisioningErrors,omitempty"`
+ // IsAttachedCompute - READ-ONLY; Indicating whether the compute was provisioned by user and brought from outside if true, or machine learning service provisioned it if false.
+ IsAttachedCompute *bool `json:"isAttachedCompute,omitempty"`
+ // ComputeType - Possible values include: 'ComputeTypeCompute', 'ComputeTypeAKS1', 'ComputeTypeAmlCompute1', 'ComputeTypeVirtualMachine1', 'ComputeTypeHDInsight1', 'ComputeTypeDataFactory1', 'ComputeTypeDatabricks1', 'ComputeTypeDataLakeAnalytics1'
+ ComputeType ComputeTypeBasicCompute `json:"computeType,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for Databricks.
+func (d Databricks) MarshalJSON() ([]byte, error) {
+ d.ComputeType = ComputeTypeDatabricks1
+ objectMap := make(map[string]interface{})
+ if d.Properties != nil {
+ objectMap["properties"] = d.Properties
+ }
+ if d.ComputeLocation != nil {
+ objectMap["computeLocation"] = d.ComputeLocation
+ }
+ if d.Description != nil {
+ objectMap["description"] = d.Description
+ }
+ if d.ResourceID != nil {
+ objectMap["resourceId"] = d.ResourceID
+ }
+ if d.ComputeType != "" {
+ objectMap["computeType"] = d.ComputeType
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsAKS is the BasicCompute implementation for Databricks.
+func (d Databricks) AsAKS() (*AKS, bool) {
+ return nil, false
+}
+
+// AsAmlCompute is the BasicCompute implementation for Databricks.
+func (d Databricks) AsAmlCompute() (*AmlCompute, bool) {
+ return nil, false
+}
+
+// AsVirtualMachine is the BasicCompute implementation for Databricks.
+func (d Databricks) AsVirtualMachine() (*VirtualMachine, bool) {
+ return nil, false
+}
+
+// AsHDInsight is the BasicCompute implementation for Databricks.
+func (d Databricks) AsHDInsight() (*HDInsight, bool) {
+ return nil, false
+}
+
+// AsDataFactory is the BasicCompute implementation for Databricks.
+func (d Databricks) AsDataFactory() (*DataFactory, bool) {
+ return nil, false
+}
+
+// AsDatabricks is the BasicCompute implementation for Databricks.
+func (d Databricks) AsDatabricks() (*Databricks, bool) {
+ return &d, true
+}
+
+// AsDataLakeAnalytics is the BasicCompute implementation for Databricks.
+func (d Databricks) AsDataLakeAnalytics() (*DataLakeAnalytics, bool) {
+ return nil, false
+}
+
+// AsCompute is the BasicCompute implementation for Databricks.
+func (d Databricks) AsCompute() (*Compute, bool) {
+ return nil, false
+}
+
+// AsBasicCompute is the BasicCompute implementation for Databricks.
+func (d Databricks) AsBasicCompute() (BasicCompute, bool) {
+ return &d, true
+}
+
+// DatabricksComputeSecrets secrets related to a Machine Learning compute based on Databricks.
+type DatabricksComputeSecrets struct {
+ // DatabricksAccessToken - access token for databricks account.
+ DatabricksAccessToken *string `json:"databricksAccessToken,omitempty"`
+ // ComputeType - Possible values include: 'ComputeTypeBasicComputeSecretsComputeTypeComputeSecrets', 'ComputeTypeBasicComputeSecretsComputeTypeAKS', 'ComputeTypeBasicComputeSecretsComputeTypeVirtualMachine', 'ComputeTypeBasicComputeSecretsComputeTypeDatabricks'
+ ComputeType ComputeTypeBasicComputeSecrets `json:"computeType,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for DatabricksComputeSecrets.
+func (dcs DatabricksComputeSecrets) MarshalJSON() ([]byte, error) {
+ dcs.ComputeType = ComputeTypeBasicComputeSecretsComputeTypeDatabricks
+ objectMap := make(map[string]interface{})
+ if dcs.DatabricksAccessToken != nil {
+ objectMap["databricksAccessToken"] = dcs.DatabricksAccessToken
+ }
+ if dcs.ComputeType != "" {
+ objectMap["computeType"] = dcs.ComputeType
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsAksComputeSecrets is the BasicComputeSecrets implementation for DatabricksComputeSecrets.
+func (dcs DatabricksComputeSecrets) AsAksComputeSecrets() (*AksComputeSecrets, bool) {
+ return nil, false
+}
+
+// AsVirtualMachineSecrets is the BasicComputeSecrets implementation for DatabricksComputeSecrets.
+func (dcs DatabricksComputeSecrets) AsVirtualMachineSecrets() (*VirtualMachineSecrets, bool) {
+ return nil, false
+}
+
+// AsDatabricksComputeSecrets is the BasicComputeSecrets implementation for DatabricksComputeSecrets.
+func (dcs DatabricksComputeSecrets) AsDatabricksComputeSecrets() (*DatabricksComputeSecrets, bool) {
+ return &dcs, true
+}
+
+// AsComputeSecrets is the BasicComputeSecrets implementation for DatabricksComputeSecrets.
+func (dcs DatabricksComputeSecrets) AsComputeSecrets() (*ComputeSecrets, bool) {
+ return nil, false
+}
+
+// AsBasicComputeSecrets is the BasicComputeSecrets implementation for DatabricksComputeSecrets.
+func (dcs DatabricksComputeSecrets) AsBasicComputeSecrets() (BasicComputeSecrets, bool) {
+ return &dcs, true
+}
+
+// DatabricksProperties ...
+type DatabricksProperties struct {
+ // DatabricksAccessToken - Databricks access token
+ DatabricksAccessToken *string `json:"databricksAccessToken,omitempty"`
+}
+
+// DataFactory a DataFactory compute.
+type DataFactory struct {
+ // ComputeLocation - Location for the underlying compute
+ ComputeLocation *string `json:"computeLocation,omitempty"`
+ // ProvisioningState - READ-ONLY; The provision state of the cluster. Valid values are Unknown, Updating, Provisioning, Succeeded, and Failed. Possible values include: 'Unknown', 'Updating', 'Creating', 'Deleting', 'Succeeded', 'Failed', 'Canceled'
+ ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
+ // Description - The description of the Machine Learning compute.
+ Description *string `json:"description,omitempty"`
+ // CreatedOn - READ-ONLY; The date and time when the compute was created.
+ CreatedOn *date.Time `json:"createdOn,omitempty"`
+ // ModifiedOn - READ-ONLY; The date and time when the compute was last modified.
+ ModifiedOn *date.Time `json:"modifiedOn,omitempty"`
+ // ResourceID - ARM resource id of the underlying compute
+ ResourceID *string `json:"resourceId,omitempty"`
+ // ProvisioningErrors - READ-ONLY; Errors during provisioning
+ ProvisioningErrors *[]Error `json:"provisioningErrors,omitempty"`
+ // IsAttachedCompute - READ-ONLY; Indicating whether the compute was provisioned by user and brought from outside if true, or machine learning service provisioned it if false.
+ IsAttachedCompute *bool `json:"isAttachedCompute,omitempty"`
+ // ComputeType - Possible values include: 'ComputeTypeCompute', 'ComputeTypeAKS1', 'ComputeTypeAmlCompute1', 'ComputeTypeVirtualMachine1', 'ComputeTypeHDInsight1', 'ComputeTypeDataFactory1', 'ComputeTypeDatabricks1', 'ComputeTypeDataLakeAnalytics1'
+ ComputeType ComputeTypeBasicCompute `json:"computeType,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for DataFactory.
+func (df DataFactory) MarshalJSON() ([]byte, error) {
+ df.ComputeType = ComputeTypeDataFactory1
+ objectMap := make(map[string]interface{})
+ if df.ComputeLocation != nil {
+ objectMap["computeLocation"] = df.ComputeLocation
+ }
+ if df.Description != nil {
+ objectMap["description"] = df.Description
+ }
+ if df.ResourceID != nil {
+ objectMap["resourceId"] = df.ResourceID
+ }
+ if df.ComputeType != "" {
+ objectMap["computeType"] = df.ComputeType
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsAKS is the BasicCompute implementation for DataFactory.
+func (df DataFactory) AsAKS() (*AKS, bool) {
+ return nil, false
+}
+
+// AsAmlCompute is the BasicCompute implementation for DataFactory.
+func (df DataFactory) AsAmlCompute() (*AmlCompute, bool) {
+ return nil, false
+}
+
+// AsVirtualMachine is the BasicCompute implementation for DataFactory.
+func (df DataFactory) AsVirtualMachine() (*VirtualMachine, bool) {
+ return nil, false
+}
+
+// AsHDInsight is the BasicCompute implementation for DataFactory.
+func (df DataFactory) AsHDInsight() (*HDInsight, bool) {
+ return nil, false
+}
+
+// AsDataFactory is the BasicCompute implementation for DataFactory.
+func (df DataFactory) AsDataFactory() (*DataFactory, bool) {
+ return &df, true
+}
+
+// AsDatabricks is the BasicCompute implementation for DataFactory.
+func (df DataFactory) AsDatabricks() (*Databricks, bool) {
+ return nil, false
+}
+
+// AsDataLakeAnalytics is the BasicCompute implementation for DataFactory.
+func (df DataFactory) AsDataLakeAnalytics() (*DataLakeAnalytics, bool) {
+ return nil, false
+}
+
+// AsCompute is the BasicCompute implementation for DataFactory.
+func (df DataFactory) AsCompute() (*Compute, bool) {
+ return nil, false
+}
+
+// AsBasicCompute is the BasicCompute implementation for DataFactory.
+func (df DataFactory) AsBasicCompute() (BasicCompute, bool) {
+ return &df, true
+}
+
+// DataLakeAnalytics a DataLakeAnalytics compute.
+type DataLakeAnalytics struct {
+ Properties *DataLakeAnalyticsProperties `json:"properties,omitempty"`
+ // ComputeLocation - Location for the underlying compute
+ ComputeLocation *string `json:"computeLocation,omitempty"`
+ // ProvisioningState - READ-ONLY; The provision state of the cluster. Valid values are Unknown, Updating, Provisioning, Succeeded, and Failed. Possible values include: 'Unknown', 'Updating', 'Creating', 'Deleting', 'Succeeded', 'Failed', 'Canceled'
+ ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
+ // Description - The description of the Machine Learning compute.
+ Description *string `json:"description,omitempty"`
+ // CreatedOn - READ-ONLY; The date and time when the compute was created.
+ CreatedOn *date.Time `json:"createdOn,omitempty"`
+ // ModifiedOn - READ-ONLY; The date and time when the compute was last modified.
+ ModifiedOn *date.Time `json:"modifiedOn,omitempty"`
+ // ResourceID - ARM resource id of the underlying compute
+ ResourceID *string `json:"resourceId,omitempty"`
+ // ProvisioningErrors - READ-ONLY; Errors during provisioning
+ ProvisioningErrors *[]Error `json:"provisioningErrors,omitempty"`
+ // IsAttachedCompute - READ-ONLY; Indicating whether the compute was provisioned by user and brought from outside if true, or machine learning service provisioned it if false.
+ IsAttachedCompute *bool `json:"isAttachedCompute,omitempty"`
+ // ComputeType - Possible values include: 'ComputeTypeCompute', 'ComputeTypeAKS1', 'ComputeTypeAmlCompute1', 'ComputeTypeVirtualMachine1', 'ComputeTypeHDInsight1', 'ComputeTypeDataFactory1', 'ComputeTypeDatabricks1', 'ComputeTypeDataLakeAnalytics1'
+ ComputeType ComputeTypeBasicCompute `json:"computeType,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for DataLakeAnalytics.
+func (dla DataLakeAnalytics) MarshalJSON() ([]byte, error) {
+ dla.ComputeType = ComputeTypeDataLakeAnalytics1
+ objectMap := make(map[string]interface{})
+ if dla.Properties != nil {
+ objectMap["properties"] = dla.Properties
+ }
+ if dla.ComputeLocation != nil {
+ objectMap["computeLocation"] = dla.ComputeLocation
+ }
+ if dla.Description != nil {
+ objectMap["description"] = dla.Description
+ }
+ if dla.ResourceID != nil {
+ objectMap["resourceId"] = dla.ResourceID
+ }
+ if dla.ComputeType != "" {
+ objectMap["computeType"] = dla.ComputeType
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsAKS is the BasicCompute implementation for DataLakeAnalytics.
+func (dla DataLakeAnalytics) AsAKS() (*AKS, bool) {
+ return nil, false
+}
+
+// AsAmlCompute is the BasicCompute implementation for DataLakeAnalytics.
+func (dla DataLakeAnalytics) AsAmlCompute() (*AmlCompute, bool) {
+ return nil, false
+}
+
+// AsVirtualMachine is the BasicCompute implementation for DataLakeAnalytics.
+func (dla DataLakeAnalytics) AsVirtualMachine() (*VirtualMachine, bool) {
+ return nil, false
+}
+
+// AsHDInsight is the BasicCompute implementation for DataLakeAnalytics.
+func (dla DataLakeAnalytics) AsHDInsight() (*HDInsight, bool) {
+ return nil, false
+}
+
+// AsDataFactory is the BasicCompute implementation for DataLakeAnalytics.
+func (dla DataLakeAnalytics) AsDataFactory() (*DataFactory, bool) {
+ return nil, false
+}
+
+// AsDatabricks is the BasicCompute implementation for DataLakeAnalytics.
+func (dla DataLakeAnalytics) AsDatabricks() (*Databricks, bool) {
+ return nil, false
+}
+
+// AsDataLakeAnalytics is the BasicCompute implementation for DataLakeAnalytics.
+func (dla DataLakeAnalytics) AsDataLakeAnalytics() (*DataLakeAnalytics, bool) {
+ return &dla, true
+}
+
+// AsCompute is the BasicCompute implementation for DataLakeAnalytics.
+func (dla DataLakeAnalytics) AsCompute() (*Compute, bool) {
+ return nil, false
+}
+
+// AsBasicCompute is the BasicCompute implementation for DataLakeAnalytics.
+func (dla DataLakeAnalytics) AsBasicCompute() (BasicCompute, bool) {
+ return &dla, true
+}
+
+// DataLakeAnalyticsProperties ...
+type DataLakeAnalyticsProperties struct {
+ // DataLakeStoreAccountName - DataLake Store Account Name
+ DataLakeStoreAccountName *string `json:"dataLakeStoreAccountName,omitempty"`
+}
+
+// Error wrapper for error response to follow ARM guidelines.
+type Error struct {
+ // Error - READ-ONLY; The error response.
+ Error *ErrorResponse `json:"error,omitempty"`
+}
+
+// ErrorDetail error detail information.
+type ErrorDetail struct {
+ // Code - Error code.
+ Code *string `json:"code,omitempty"`
+ // Message - Error message.
+ Message *string `json:"message,omitempty"`
+}
+
+// ErrorResponse error response information.
+type ErrorResponse struct {
+ // Code - READ-ONLY; Error code.
+ Code *string `json:"code,omitempty"`
+ // Message - READ-ONLY; Error message.
+ Message *string `json:"message,omitempty"`
+ // Details - READ-ONLY; An array of error detail objects.
+ Details *[]ErrorDetail `json:"details,omitempty"`
+}
+
+// HDInsight a HDInsight compute.
+type HDInsight struct {
+ Properties *HDInsightProperties `json:"properties,omitempty"`
+ // ComputeLocation - Location for the underlying compute
+ ComputeLocation *string `json:"computeLocation,omitempty"`
+ // ProvisioningState - READ-ONLY; The provision state of the cluster. Valid values are Unknown, Updating, Provisioning, Succeeded, and Failed. Possible values include: 'Unknown', 'Updating', 'Creating', 'Deleting', 'Succeeded', 'Failed', 'Canceled'
+ ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
+ // Description - The description of the Machine Learning compute.
+ Description *string `json:"description,omitempty"`
+ // CreatedOn - READ-ONLY; The date and time when the compute was created.
+ CreatedOn *date.Time `json:"createdOn,omitempty"`
+ // ModifiedOn - READ-ONLY; The date and time when the compute was last modified.
+ ModifiedOn *date.Time `json:"modifiedOn,omitempty"`
+ // ResourceID - ARM resource id of the underlying compute
+ ResourceID *string `json:"resourceId,omitempty"`
+ // ProvisioningErrors - READ-ONLY; Errors during provisioning
+ ProvisioningErrors *[]Error `json:"provisioningErrors,omitempty"`
+ // IsAttachedCompute - READ-ONLY; Indicating whether the compute was provisioned by user and brought from outside if true, or machine learning service provisioned it if false.
+ IsAttachedCompute *bool `json:"isAttachedCompute,omitempty"`
+ // ComputeType - Possible values include: 'ComputeTypeCompute', 'ComputeTypeAKS1', 'ComputeTypeAmlCompute1', 'ComputeTypeVirtualMachine1', 'ComputeTypeHDInsight1', 'ComputeTypeDataFactory1', 'ComputeTypeDatabricks1', 'ComputeTypeDataLakeAnalytics1'
+ ComputeType ComputeTypeBasicCompute `json:"computeType,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for HDInsight.
+func (hi HDInsight) MarshalJSON() ([]byte, error) {
+ hi.ComputeType = ComputeTypeHDInsight1
+ objectMap := make(map[string]interface{})
+ if hi.Properties != nil {
+ objectMap["properties"] = hi.Properties
+ }
+ if hi.ComputeLocation != nil {
+ objectMap["computeLocation"] = hi.ComputeLocation
+ }
+ if hi.Description != nil {
+ objectMap["description"] = hi.Description
+ }
+ if hi.ResourceID != nil {
+ objectMap["resourceId"] = hi.ResourceID
+ }
+ if hi.ComputeType != "" {
+ objectMap["computeType"] = hi.ComputeType
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsAKS is the BasicCompute implementation for HDInsight.
+func (hi HDInsight) AsAKS() (*AKS, bool) {
+ return nil, false
+}
+
+// AsAmlCompute is the BasicCompute implementation for HDInsight.
+func (hi HDInsight) AsAmlCompute() (*AmlCompute, bool) {
+ return nil, false
+}
+
+// AsVirtualMachine is the BasicCompute implementation for HDInsight.
+func (hi HDInsight) AsVirtualMachine() (*VirtualMachine, bool) {
+ return nil, false
+}
+
+// AsHDInsight is the BasicCompute implementation for HDInsight.
+func (hi HDInsight) AsHDInsight() (*HDInsight, bool) {
+ return &hi, true
+}
+
+// AsDataFactory is the BasicCompute implementation for HDInsight.
+func (hi HDInsight) AsDataFactory() (*DataFactory, bool) {
+ return nil, false
+}
+
+// AsDatabricks is the BasicCompute implementation for HDInsight.
+func (hi HDInsight) AsDatabricks() (*Databricks, bool) {
+ return nil, false
+}
+
+// AsDataLakeAnalytics is the BasicCompute implementation for HDInsight.
+func (hi HDInsight) AsDataLakeAnalytics() (*DataLakeAnalytics, bool) {
+ return nil, false
+}
+
+// AsCompute is the BasicCompute implementation for HDInsight.
+func (hi HDInsight) AsCompute() (*Compute, bool) {
+ return nil, false
+}
+
+// AsBasicCompute is the BasicCompute implementation for HDInsight.
+func (hi HDInsight) AsBasicCompute() (BasicCompute, bool) {
+ return &hi, true
+}
+
+// HDInsightProperties ...
+type HDInsightProperties struct {
+ // SSHPort - Port open for ssh connections on the master node of the cluster.
+ SSHPort *int32 `json:"sshPort,omitempty"`
+ // Address - Public IP address of the master node of the cluster.
+ Address *string `json:"address,omitempty"`
+ // AdministratorAccount - Admin credentials for master node of the cluster
+ AdministratorAccount *VirtualMachineSSHCredentials `json:"administratorAccount,omitempty"`
+}
+
+// Identity identity for the resource.
+type Identity struct {
+ // PrincipalID - READ-ONLY; The principal ID of resource identity.
+ PrincipalID *string `json:"principalId,omitempty"`
+ // TenantID - READ-ONLY; The tenant ID of resource.
+ TenantID *string `json:"tenantId,omitempty"`
+ // Type - The identity type. Possible values include: 'SystemAssigned'
+ Type ResourceIdentityType `json:"type,omitempty"`
+}
+
+// ListUsagesResult the List Usages operation response.
+type ListUsagesResult struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; The list of AML resource usages.
+ Value *[]Usage `json:"value,omitempty"`
+ // NextLink - READ-ONLY; The URI to fetch the next page of AML resource usage information. Call ListNext() with this to fetch the next page of AML resource usage information.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// ListUsagesResultIterator provides access to a complete listing of Usage values.
+type ListUsagesResultIterator struct {
+ i int
+ page ListUsagesResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *ListUsagesResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ListUsagesResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *ListUsagesResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter ListUsagesResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter ListUsagesResultIterator) Response() ListUsagesResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter ListUsagesResultIterator) Value() Usage {
+ if !iter.page.NotDone() {
+ return Usage{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the ListUsagesResultIterator type.
+func NewListUsagesResultIterator(page ListUsagesResultPage) ListUsagesResultIterator {
+ return ListUsagesResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (lur ListUsagesResult) IsEmpty() bool {
+ return lur.Value == nil || len(*lur.Value) == 0
+}
+
+// listUsagesResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (lur ListUsagesResult) listUsagesResultPreparer(ctx context.Context) (*http.Request, error) {
+ if lur.NextLink == nil || len(to.String(lur.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(lur.NextLink)))
+}
+
+// ListUsagesResultPage contains a page of Usage values.
+type ListUsagesResultPage struct {
+ fn func(context.Context, ListUsagesResult) (ListUsagesResult, error)
+ lur ListUsagesResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *ListUsagesResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ListUsagesResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.lur)
+ if err != nil {
+ return err
+ }
+ page.lur = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *ListUsagesResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page ListUsagesResultPage) NotDone() bool {
+ return !page.lur.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page ListUsagesResultPage) Response() ListUsagesResult {
+ return page.lur
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page ListUsagesResultPage) Values() []Usage {
+ if page.lur.IsEmpty() {
+ return nil
+ }
+ return *page.lur.Value
+}
+
+// Creates a new instance of the ListUsagesResultPage type.
+func NewListUsagesResultPage(getNextPage func(context.Context, ListUsagesResult) (ListUsagesResult, error)) ListUsagesResultPage {
+ return ListUsagesResultPage{fn: getNextPage}
+}
+
+// ListWorkspaceKeysResult ...
+type ListWorkspaceKeysResult struct {
+ autorest.Response `json:"-"`
+ // UserStorageKey - READ-ONLY
+ UserStorageKey *string `json:"userStorageKey,omitempty"`
+ // UserStorageResourceID - READ-ONLY
+ UserStorageResourceID *string `json:"userStorageResourceId,omitempty"`
+ // AppInsightsInstrumentationKey - READ-ONLY
+ AppInsightsInstrumentationKey *string `json:"appInsightsInstrumentationKey,omitempty"`
+ // ContainerRegistryCredentials - READ-ONLY
+ ContainerRegistryCredentials *RegistryListCredentialsResult `json:"containerRegistryCredentials,omitempty"`
+}
+
+// MachineLearningComputeCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type MachineLearningComputeCreateOrUpdateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *MachineLearningComputeCreateOrUpdateFuture) Result(client MachineLearningComputeClient) (cr ComputeResource, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.MachineLearningComputeCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("machinelearningservices.MachineLearningComputeCreateOrUpdateFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if cr.Response.Response, err = future.GetResult(sender); err == nil && cr.Response.Response.StatusCode != http.StatusNoContent {
+ cr, err = client.CreateOrUpdateResponder(cr.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.MachineLearningComputeCreateOrUpdateFuture", "Result", cr.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// MachineLearningComputeDeleteFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type MachineLearningComputeDeleteFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *MachineLearningComputeDeleteFuture) Result(client MachineLearningComputeClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.MachineLearningComputeDeleteFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("machinelearningservices.MachineLearningComputeDeleteFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// MachineLearningComputeUpdateFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type MachineLearningComputeUpdateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *MachineLearningComputeUpdateFuture) Result(client MachineLearningComputeClient) (cr ComputeResource, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.MachineLearningComputeUpdateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("machinelearningservices.MachineLearningComputeUpdateFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if cr.Response.Response, err = future.GetResult(sender); err == nil && cr.Response.Response.StatusCode != http.StatusNoContent {
+ cr, err = client.UpdateResponder(cr.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.MachineLearningComputeUpdateFuture", "Result", cr.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// NodeStateCounts counts of various compute node states on the amlCompute.
+type NodeStateCounts struct {
+ // IdleNodeCount - READ-ONLY; Number of compute nodes in idle state.
+ IdleNodeCount *int32 `json:"idleNodeCount,omitempty"`
+ // RunningNodeCount - READ-ONLY; Number of compute nodes which are running jobs.
+ RunningNodeCount *int32 `json:"runningNodeCount,omitempty"`
+ // PreparingNodeCount - READ-ONLY; Number of compute nodes which are being prepared.
+ PreparingNodeCount *int32 `json:"preparingNodeCount,omitempty"`
+ // UnusableNodeCount - READ-ONLY; Number of compute nodes which are in unusable state.
+ UnusableNodeCount *int32 `json:"unusableNodeCount,omitempty"`
+ // LeavingNodeCount - READ-ONLY; Number of compute nodes which are leaving the amlCompute.
+ LeavingNodeCount *int32 `json:"leavingNodeCount,omitempty"`
+ // PreemptedNodeCount - READ-ONLY; Number of compute nodes which are in preempted state.
+ PreemptedNodeCount *int32 `json:"preemptedNodeCount,omitempty"`
+}
+
+// Operation azure Machine Learning workspace REST API operation
+type Operation struct {
+ // Name - Operation name: {provider}/{resource}/{operation}
+ Name *string `json:"name,omitempty"`
+ // Display - Display name of operation
+ Display *OperationDisplay `json:"display,omitempty"`
+}
+
+// OperationDisplay display name of operation
+type OperationDisplay struct {
+ // Provider - The resource provider name: Microsoft.MachineLearningExperimentation
+ Provider *string `json:"provider,omitempty"`
+ // Resource - The resource on which the operation is performed.
+ Resource *string `json:"resource,omitempty"`
+ // Operation - The operation that users can perform.
+ Operation *string `json:"operation,omitempty"`
+ // Description - The description for the operation.
+ Description *string `json:"description,omitempty"`
+}
+
+// OperationListResult an array of operations supported by the resource provider.
+type OperationListResult struct {
+ autorest.Response `json:"-"`
+ // Value - List of AML workspace operations supported by the AML workspace resource provider.
+ Value *[]Operation `json:"value,omitempty"`
+}
+
+// PaginatedComputeResourcesList paginated list of Machine Learning compute objects wrapped in ARM resource
+// envelope.
+type PaginatedComputeResourcesList struct {
+ autorest.Response `json:"-"`
+ // Value - An array of Machine Learning compute objects wrapped in ARM resource envelope.
+ Value *[]ComputeResource `json:"value,omitempty"`
+ // NextLink - A continuation link (absolute URI) to the next page of results in the list.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// PaginatedComputeResourcesListIterator provides access to a complete listing of ComputeResource values.
+type PaginatedComputeResourcesListIterator struct {
+ i int
+ page PaginatedComputeResourcesListPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *PaginatedComputeResourcesListIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PaginatedComputeResourcesListIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *PaginatedComputeResourcesListIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter PaginatedComputeResourcesListIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter PaginatedComputeResourcesListIterator) Response() PaginatedComputeResourcesList {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter PaginatedComputeResourcesListIterator) Value() ComputeResource {
+ if !iter.page.NotDone() {
+ return ComputeResource{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the PaginatedComputeResourcesListIterator type.
+func NewPaginatedComputeResourcesListIterator(page PaginatedComputeResourcesListPage) PaginatedComputeResourcesListIterator {
+ return PaginatedComputeResourcesListIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (pcrl PaginatedComputeResourcesList) IsEmpty() bool {
+ return pcrl.Value == nil || len(*pcrl.Value) == 0
+}
+
+// paginatedComputeResourcesListPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (pcrl PaginatedComputeResourcesList) paginatedComputeResourcesListPreparer(ctx context.Context) (*http.Request, error) {
+ if pcrl.NextLink == nil || len(to.String(pcrl.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(pcrl.NextLink)))
+}
+
+// PaginatedComputeResourcesListPage contains a page of ComputeResource values.
+type PaginatedComputeResourcesListPage struct {
+ fn func(context.Context, PaginatedComputeResourcesList) (PaginatedComputeResourcesList, error)
+ pcrl PaginatedComputeResourcesList
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *PaginatedComputeResourcesListPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PaginatedComputeResourcesListPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.pcrl)
+ if err != nil {
+ return err
+ }
+ page.pcrl = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *PaginatedComputeResourcesListPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page PaginatedComputeResourcesListPage) NotDone() bool {
+ return !page.pcrl.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page PaginatedComputeResourcesListPage) Response() PaginatedComputeResourcesList {
+ return page.pcrl
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page PaginatedComputeResourcesListPage) Values() []ComputeResource {
+ if page.pcrl.IsEmpty() {
+ return nil
+ }
+ return *page.pcrl.Value
+}
+
+// Creates a new instance of the PaginatedComputeResourcesListPage type.
+func NewPaginatedComputeResourcesListPage(getNextPage func(context.Context, PaginatedComputeResourcesList) (PaginatedComputeResourcesList, error)) PaginatedComputeResourcesListPage {
+ return PaginatedComputeResourcesListPage{fn: getNextPage}
+}
+
+// Password ...
+type Password struct {
+ // Name - READ-ONLY
+ Name *string `json:"name,omitempty"`
+ // Value - READ-ONLY
+ Value *string `json:"value,omitempty"`
+}
+
+// RegistryListCredentialsResult ...
+type RegistryListCredentialsResult struct {
+ // Location - READ-ONLY
+ Location *string `json:"location,omitempty"`
+ // Username - READ-ONLY
+ Username *string `json:"username,omitempty"`
+ Passwords *[]Password `json:"passwords,omitempty"`
+}
+
+// Resource azure Resource Manager resource envelope.
+type Resource struct {
+ // ID - READ-ONLY; Specifies the resource ID.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Specifies the name of the resource.
+ Name *string `json:"name,omitempty"`
+ // Identity - READ-ONLY; The identity of the resource.
+ Identity *Identity `json:"identity,omitempty"`
+ // Location - Specifies the location of the resource.
+ Location *string `json:"location,omitempty"`
+ // Type - READ-ONLY; Specifies the type of the resource.
+ Type *string `json:"type,omitempty"`
+ // Tags - Contains resource tags defined as key/value pairs.
+ Tags map[string]*string `json:"tags"`
+}
+
+// MarshalJSON is the custom marshaler for Resource.
+func (r Resource) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if r.Location != nil {
+ objectMap["location"] = r.Location
+ }
+ if r.Tags != nil {
+ objectMap["tags"] = r.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// ResourceID represents a resource ID. For example, for a subnet, it is the resource URL for the subnet.
+type ResourceID struct {
+ // ID - The ID of the resource
+ ID *string `json:"id,omitempty"`
+}
+
+// ScaleSettings scale settings for AML Compute
+type ScaleSettings struct {
+ // MaxNodeCount - Max number of nodes to use
+ MaxNodeCount *int32 `json:"maxNodeCount,omitempty"`
+ // MinNodeCount - Min number of nodes to use
+ MinNodeCount *int32 `json:"minNodeCount,omitempty"`
+ // NodeIdleTimeBeforeScaleDown - Node Idle Time before scaling down amlCompute
+ NodeIdleTimeBeforeScaleDown *string `json:"nodeIdleTimeBeforeScaleDown,omitempty"`
+}
+
+// ServicePrincipalCredentials service principal credentials.
+type ServicePrincipalCredentials struct {
+ // ClientID - Client Id
+ ClientID *string `json:"clientId,omitempty"`
+ // ClientSecret - Client secret
+ ClientSecret *string `json:"clientSecret,omitempty"`
+}
+
+// SslConfiguration the ssl configuration for scoring
+type SslConfiguration struct {
+ // Status - Enable or disable ssl for scoring. Possible values include: 'StatusDisabled', 'StatusEnabled'
+ Status Status `json:"status,omitempty"`
+ // Cert - Cert data
+ Cert *string `json:"cert,omitempty"`
+ // Key - Key data
+ Key *string `json:"key,omitempty"`
+ // Cname - CNAME of the cert
+ Cname *string `json:"cname,omitempty"`
+}
+
+// SystemService a system service running on a compute.
+type SystemService struct {
+ // SystemServiceType - READ-ONLY; The type of this system service.
+ SystemServiceType *string `json:"systemServiceType,omitempty"`
+ // PublicIPAddress - READ-ONLY; Public IP address
+ PublicIPAddress *string `json:"publicIpAddress,omitempty"`
+ // Version - READ-ONLY; The version for this type.
+ Version *string `json:"version,omitempty"`
+}
+
+// Usage describes AML Resource Usage.
+type Usage struct {
+ // ID - READ-ONLY; Specifies the resource ID.
+ ID *string `json:"id,omitempty"`
+ // Type - READ-ONLY; Specifies the resource type.
+ Type *string `json:"type,omitempty"`
+ // Unit - READ-ONLY; An enum describing the unit of usage measurement. Possible values include: 'Count'
+ Unit UsageUnit `json:"unit,omitempty"`
+ // CurrentValue - READ-ONLY; The current usage of the resource.
+ CurrentValue *int64 `json:"currentValue,omitempty"`
+ // Limit - READ-ONLY; The maximum permitted usage of the resource.
+ Limit *int64 `json:"limit,omitempty"`
+ // Name - READ-ONLY; The name of the type of usage.
+ Name *UsageName `json:"name,omitempty"`
+}
+
+// UsageName the Usage Names.
+type UsageName struct {
+ // Value - READ-ONLY; The name of the resource.
+ Value *string `json:"value,omitempty"`
+ // LocalizedValue - READ-ONLY; The localized name of the resource.
+ LocalizedValue *string `json:"localizedValue,omitempty"`
+}
+
+// UserAccountCredentials settings for user account that gets created on each on the nodes of a compute.
+type UserAccountCredentials struct {
+ // AdminUserName - Name of the administrator user account which can be used to SSH to nodes.
+ AdminUserName *string `json:"adminUserName,omitempty"`
+ // AdminUserSSHPublicKey - SSH public key of the administrator user account.
+ AdminUserSSHPublicKey *string `json:"adminUserSshPublicKey,omitempty"`
+ // AdminUserPassword - Password of the administrator user account.
+ AdminUserPassword *string `json:"adminUserPassword,omitempty"`
+}
+
+// VirtualMachine a Machine Learning compute based on Azure Virtual Machines.
+type VirtualMachine struct {
+ Properties *VirtualMachineProperties `json:"properties,omitempty"`
+ // ComputeLocation - Location for the underlying compute
+ ComputeLocation *string `json:"computeLocation,omitempty"`
+ // ProvisioningState - READ-ONLY; The provision state of the cluster. Valid values are Unknown, Updating, Provisioning, Succeeded, and Failed. Possible values include: 'Unknown', 'Updating', 'Creating', 'Deleting', 'Succeeded', 'Failed', 'Canceled'
+ ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
+ // Description - The description of the Machine Learning compute.
+ Description *string `json:"description,omitempty"`
+ // CreatedOn - READ-ONLY; The date and time when the compute was created.
+ CreatedOn *date.Time `json:"createdOn,omitempty"`
+ // ModifiedOn - READ-ONLY; The date and time when the compute was last modified.
+ ModifiedOn *date.Time `json:"modifiedOn,omitempty"`
+ // ResourceID - ARM resource id of the underlying compute
+ ResourceID *string `json:"resourceId,omitempty"`
+ // ProvisioningErrors - READ-ONLY; Errors during provisioning
+ ProvisioningErrors *[]Error `json:"provisioningErrors,omitempty"`
+ // IsAttachedCompute - READ-ONLY; Indicating whether the compute was provisioned by user and brought from outside if true, or machine learning service provisioned it if false.
+ IsAttachedCompute *bool `json:"isAttachedCompute,omitempty"`
+ // ComputeType - Possible values include: 'ComputeTypeCompute', 'ComputeTypeAKS1', 'ComputeTypeAmlCompute1', 'ComputeTypeVirtualMachine1', 'ComputeTypeHDInsight1', 'ComputeTypeDataFactory1', 'ComputeTypeDatabricks1', 'ComputeTypeDataLakeAnalytics1'
+ ComputeType ComputeTypeBasicCompute `json:"computeType,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for VirtualMachine.
+func (VM VirtualMachine) MarshalJSON() ([]byte, error) {
+ VM.ComputeType = ComputeTypeVirtualMachine1
+ objectMap := make(map[string]interface{})
+ if VM.Properties != nil {
+ objectMap["properties"] = VM.Properties
+ }
+ if VM.ComputeLocation != nil {
+ objectMap["computeLocation"] = VM.ComputeLocation
+ }
+ if VM.Description != nil {
+ objectMap["description"] = VM.Description
+ }
+ if VM.ResourceID != nil {
+ objectMap["resourceId"] = VM.ResourceID
+ }
+ if VM.ComputeType != "" {
+ objectMap["computeType"] = VM.ComputeType
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsAKS is the BasicCompute implementation for VirtualMachine.
+func (VM VirtualMachine) AsAKS() (*AKS, bool) {
+ return nil, false
+}
+
+// AsAmlCompute is the BasicCompute implementation for VirtualMachine.
+func (VM VirtualMachine) AsAmlCompute() (*AmlCompute, bool) {
+ return nil, false
+}
+
+// AsVirtualMachine is the BasicCompute implementation for VirtualMachine.
+func (VM VirtualMachine) AsVirtualMachine() (*VirtualMachine, bool) {
+ return &VM, true
+}
+
+// AsHDInsight is the BasicCompute implementation for VirtualMachine.
+func (VM VirtualMachine) AsHDInsight() (*HDInsight, bool) {
+ return nil, false
+}
+
+// AsDataFactory is the BasicCompute implementation for VirtualMachine.
+func (VM VirtualMachine) AsDataFactory() (*DataFactory, bool) {
+ return nil, false
+}
+
+// AsDatabricks is the BasicCompute implementation for VirtualMachine.
+func (VM VirtualMachine) AsDatabricks() (*Databricks, bool) {
+ return nil, false
+}
+
+// AsDataLakeAnalytics is the BasicCompute implementation for VirtualMachine.
+func (VM VirtualMachine) AsDataLakeAnalytics() (*DataLakeAnalytics, bool) {
+ return nil, false
+}
+
+// AsCompute is the BasicCompute implementation for VirtualMachine.
+func (VM VirtualMachine) AsCompute() (*Compute, bool) {
+ return nil, false
+}
+
+// AsBasicCompute is the BasicCompute implementation for VirtualMachine.
+func (VM VirtualMachine) AsBasicCompute() (BasicCompute, bool) {
+ return &VM, true
+}
+
+// VirtualMachineProperties ...
+type VirtualMachineProperties struct {
+ // VirtualMachineSize - Virtual Machine size
+ VirtualMachineSize *string `json:"virtualMachineSize,omitempty"`
+ // SSHPort - Port open for ssh connections.
+ SSHPort *int32 `json:"sshPort,omitempty"`
+ // Address - Public IP address of the virtual machine.
+ Address *string `json:"address,omitempty"`
+ // AdministratorAccount - Admin credentials for virtual machine
+ AdministratorAccount *VirtualMachineSSHCredentials `json:"administratorAccount,omitempty"`
+}
+
+// VirtualMachineSecrets secrets related to a Machine Learning compute based on AKS.
+type VirtualMachineSecrets struct {
+ // AdministratorAccount - Admin credentials for virtual machine.
+ AdministratorAccount *VirtualMachineSSHCredentials `json:"administratorAccount,omitempty"`
+ // ComputeType - Possible values include: 'ComputeTypeBasicComputeSecretsComputeTypeComputeSecrets', 'ComputeTypeBasicComputeSecretsComputeTypeAKS', 'ComputeTypeBasicComputeSecretsComputeTypeVirtualMachine', 'ComputeTypeBasicComputeSecretsComputeTypeDatabricks'
+ ComputeType ComputeTypeBasicComputeSecrets `json:"computeType,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for VirtualMachineSecrets.
+func (vms VirtualMachineSecrets) MarshalJSON() ([]byte, error) {
+ vms.ComputeType = ComputeTypeBasicComputeSecretsComputeTypeVirtualMachine
+ objectMap := make(map[string]interface{})
+ if vms.AdministratorAccount != nil {
+ objectMap["administratorAccount"] = vms.AdministratorAccount
+ }
+ if vms.ComputeType != "" {
+ objectMap["computeType"] = vms.ComputeType
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsAksComputeSecrets is the BasicComputeSecrets implementation for VirtualMachineSecrets.
+func (vms VirtualMachineSecrets) AsAksComputeSecrets() (*AksComputeSecrets, bool) {
+ return nil, false
+}
+
+// AsVirtualMachineSecrets is the BasicComputeSecrets implementation for VirtualMachineSecrets.
+func (vms VirtualMachineSecrets) AsVirtualMachineSecrets() (*VirtualMachineSecrets, bool) {
+ return &vms, true
+}
+
+// AsDatabricksComputeSecrets is the BasicComputeSecrets implementation for VirtualMachineSecrets.
+func (vms VirtualMachineSecrets) AsDatabricksComputeSecrets() (*DatabricksComputeSecrets, bool) {
+ return nil, false
+}
+
+// AsComputeSecrets is the BasicComputeSecrets implementation for VirtualMachineSecrets.
+func (vms VirtualMachineSecrets) AsComputeSecrets() (*ComputeSecrets, bool) {
+ return nil, false
+}
+
+// AsBasicComputeSecrets is the BasicComputeSecrets implementation for VirtualMachineSecrets.
+func (vms VirtualMachineSecrets) AsBasicComputeSecrets() (BasicComputeSecrets, bool) {
+ return &vms, true
+}
+
+// VirtualMachineSize describes the properties of a VM size.
+type VirtualMachineSize struct {
+ // Name - READ-ONLY; The name of the virtual machine size.
+ Name *string `json:"name,omitempty"`
+ // Family - READ-ONLY; The family name of the virtual machine size.
+ Family *string `json:"family,omitempty"`
+ // VCPUs - READ-ONLY; The number of vCPUs supported by the virtual machine size.
+ VCPUs *int32 `json:"vCPUs,omitempty"`
+ // Gpus - READ-ONLY; The number of gPUs supported by the virtual machine size.
+ Gpus *int32 `json:"gpus,omitempty"`
+ // OsVhdSizeMB - READ-ONLY; The OS VHD disk size, in MB, allowed by the virtual machine size.
+ OsVhdSizeMB *int32 `json:"osVhdSizeMB,omitempty"`
+ // MaxResourceVolumeMB - READ-ONLY; The resource volume size, in MB, allowed by the virtual machine size.
+ MaxResourceVolumeMB *int32 `json:"maxResourceVolumeMB,omitempty"`
+ // MemoryGB - READ-ONLY; The amount of memory, in GB, supported by the virtual machine size.
+ MemoryGB *float64 `json:"memoryGB,omitempty"`
+ // LowPriorityCapable - READ-ONLY; Specifies if the virtual machine size supports low priority VMs.
+ LowPriorityCapable *bool `json:"lowPriorityCapable,omitempty"`
+ // PremiumIO - READ-ONLY; Specifies if the virtual machine size supports premium IO.
+ PremiumIO *bool `json:"premiumIO,omitempty"`
+}
+
+// VirtualMachineSizeListResult the List Virtual Machine size operation response.
+type VirtualMachineSizeListResult struct {
+ autorest.Response `json:"-"`
+ // AmlCompute - The list of virtual machine sizes supported by AmlCompute.
+ AmlCompute *[]VirtualMachineSize `json:"amlCompute,omitempty"`
+}
+
+// VirtualMachineSSHCredentials admin credentials for virtual machine
+type VirtualMachineSSHCredentials struct {
+ // Username - Username of admin account
+ Username *string `json:"username,omitempty"`
+ // Password - Password of admin account
+ Password *string `json:"password,omitempty"`
+ // PublicKeyData - Public key data
+ PublicKeyData *string `json:"publicKeyData,omitempty"`
+ // PrivateKeyData - Private key data
+ PrivateKeyData *string `json:"privateKeyData,omitempty"`
+}
+
+// Workspace an object that represents a machine learning workspace.
+type Workspace struct {
+ autorest.Response `json:"-"`
+ // WorkspaceProperties - The properties of the machine learning workspace.
+ *WorkspaceProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Specifies the resource ID.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Specifies the name of the resource.
+ Name *string `json:"name,omitempty"`
+ // Identity - READ-ONLY; The identity of the resource.
+ Identity *Identity `json:"identity,omitempty"`
+ // Location - Specifies the location of the resource.
+ Location *string `json:"location,omitempty"`
+ // Type - READ-ONLY; Specifies the type of the resource.
+ Type *string `json:"type,omitempty"`
+ // Tags - Contains resource tags defined as key/value pairs.
+ Tags map[string]*string `json:"tags"`
+}
+
+// MarshalJSON is the custom marshaler for Workspace.
+func (w Workspace) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if w.WorkspaceProperties != nil {
+ objectMap["properties"] = w.WorkspaceProperties
+ }
+ if w.Location != nil {
+ objectMap["location"] = w.Location
+ }
+ if w.Tags != nil {
+ objectMap["tags"] = w.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for Workspace struct.
+func (w *Workspace) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var workspaceProperties WorkspaceProperties
+ err = json.Unmarshal(*v, &workspaceProperties)
+ if err != nil {
+ return err
+ }
+ w.WorkspaceProperties = &workspaceProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ w.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ w.Name = &name
+ }
+ case "identity":
+ if v != nil {
+ var identity Identity
+ err = json.Unmarshal(*v, &identity)
+ if err != nil {
+ return err
+ }
+ w.Identity = &identity
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ w.Location = &location
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ w.Type = &typeVar
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ w.Tags = tags
+ }
+ }
+ }
+
+ return nil
+}
+
+// WorkspaceListResult the result of a request to list machine learning workspaces.
+type WorkspaceListResult struct {
+ autorest.Response `json:"-"`
+ // Value - The list of machine learning workspaces. Since this list may be incomplete, the nextLink field should be used to request the next list of machine learning workspaces.
+ Value *[]Workspace `json:"value,omitempty"`
+ // NextLink - The URI that can be used to request the next list of machine learning workspaces.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// WorkspaceListResultIterator provides access to a complete listing of Workspace values.
+type WorkspaceListResultIterator struct {
+ i int
+ page WorkspaceListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *WorkspaceListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/WorkspaceListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *WorkspaceListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter WorkspaceListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter WorkspaceListResultIterator) Response() WorkspaceListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter WorkspaceListResultIterator) Value() Workspace {
+ if !iter.page.NotDone() {
+ return Workspace{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the WorkspaceListResultIterator type.
+func NewWorkspaceListResultIterator(page WorkspaceListResultPage) WorkspaceListResultIterator {
+ return WorkspaceListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (wlr WorkspaceListResult) IsEmpty() bool {
+ return wlr.Value == nil || len(*wlr.Value) == 0
+}
+
+// workspaceListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (wlr WorkspaceListResult) workspaceListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if wlr.NextLink == nil || len(to.String(wlr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(wlr.NextLink)))
+}
+
+// WorkspaceListResultPage contains a page of Workspace values.
+type WorkspaceListResultPage struct {
+ fn func(context.Context, WorkspaceListResult) (WorkspaceListResult, error)
+ wlr WorkspaceListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *WorkspaceListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/WorkspaceListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.wlr)
+ if err != nil {
+ return err
+ }
+ page.wlr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *WorkspaceListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page WorkspaceListResultPage) NotDone() bool {
+ return !page.wlr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page WorkspaceListResultPage) Response() WorkspaceListResult {
+ return page.wlr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page WorkspaceListResultPage) Values() []Workspace {
+ if page.wlr.IsEmpty() {
+ return nil
+ }
+ return *page.wlr.Value
+}
+
+// Creates a new instance of the WorkspaceListResultPage type.
+func NewWorkspaceListResultPage(getNextPage func(context.Context, WorkspaceListResult) (WorkspaceListResult, error)) WorkspaceListResultPage {
+ return WorkspaceListResultPage{fn: getNextPage}
+}
+
+// WorkspaceProperties the properties of a machine learning workspace.
+type WorkspaceProperties struct {
+ // WorkspaceID - READ-ONLY; The immutable id associated with this workspace.
+ WorkspaceID *string `json:"workspaceId,omitempty"`
+ // Description - The description of this workspace.
+ Description *string `json:"description,omitempty"`
+ // FriendlyName - The friendly name for this workspace. This name in mutable
+ FriendlyName *string `json:"friendlyName,omitempty"`
+ // CreationTime - READ-ONLY; The creation time of the machine learning workspace in ISO8601 format.
+ CreationTime *date.Time `json:"creationTime,omitempty"`
+ // KeyVault - ARM id of the key vault associated with this workspace. This cannot be changed once the workspace has been created
+ KeyVault *string `json:"keyVault,omitempty"`
+ // ApplicationInsights - ARM id of the application insights associated with this workspace. This cannot be changed once the workspace has been created
+ ApplicationInsights *string `json:"applicationInsights,omitempty"`
+ // ContainerRegistry - ARM id of the container registry associated with this workspace. This cannot be changed once the workspace has been created
+ ContainerRegistry *string `json:"containerRegistry,omitempty"`
+ // StorageAccount - ARM id of the storage account associated with this workspace. This cannot be changed once the workspace has been created
+ StorageAccount *string `json:"storageAccount,omitempty"`
+ // DiscoveryURL - Url for the discovery service to identify regional endpoints for machine learning experimentation services
+ DiscoveryURL *string `json:"discoveryUrl,omitempty"`
+ // ProvisioningState - READ-ONLY; The current deployment state of workspace resource. The provisioningState is to indicate states for resource provisioning. Possible values include: 'Unknown', 'Updating', 'Creating', 'Deleting', 'Succeeded', 'Failed', 'Canceled'
+ ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
+}
+
+// WorkspacePropertiesUpdateParameters the parameters for updating the properties of a machine learning
+// workspace.
+type WorkspacePropertiesUpdateParameters struct {
+ // Description - The description of this workspace.
+ Description *string `json:"description,omitempty"`
+ // FriendlyName - The friendly name for this workspace.
+ FriendlyName *string `json:"friendlyName,omitempty"`
+}
+
+// WorkspaceUpdateParameters the parameters for updating a machine learning workspace.
+type WorkspaceUpdateParameters struct {
+ // Tags - The resource tags for the machine learning workspace.
+ Tags map[string]*string `json:"tags"`
+ // WorkspacePropertiesUpdateParameters - The properties that the machine learning workspace will be updated with.
+ *WorkspacePropertiesUpdateParameters `json:"properties,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for WorkspaceUpdateParameters.
+func (wup WorkspaceUpdateParameters) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if wup.Tags != nil {
+ objectMap["tags"] = wup.Tags
+ }
+ if wup.WorkspacePropertiesUpdateParameters != nil {
+ objectMap["properties"] = wup.WorkspacePropertiesUpdateParameters
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for WorkspaceUpdateParameters struct.
+func (wup *WorkspaceUpdateParameters) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ wup.Tags = tags
+ }
+ case "properties":
+ if v != nil {
+ var workspacePropertiesUpdateParameters WorkspacePropertiesUpdateParameters
+ err = json.Unmarshal(*v, &workspacePropertiesUpdateParameters)
+ if err != nil {
+ return err
+ }
+ wup.WorkspacePropertiesUpdateParameters = &workspacePropertiesUpdateParameters
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices/operations.go b/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices/operations.go
new file mode 100644
index 000000000000..e897b167eadb
--- /dev/null
+++ b/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices/operations.go
@@ -0,0 +1,109 @@
+package machinelearningservices
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// OperationsClient is the these APIs allow end users to operate on Azure Machine Learning Workspace resources.
+type OperationsClient struct {
+ BaseClient
+}
+
+// NewOperationsClient creates an instance of the OperationsClient client.
+func NewOperationsClient(subscriptionID string) OperationsClient {
+ return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client.
+func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient {
+ return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// List lists all of the available Azure Machine Learning Workspaces REST API operations.
+func (client OperationsClient) List(ctx context.Context) (result OperationListResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.ListPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.OperationsClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "machinelearningservices.OperationsClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.OperationsClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) {
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPath("/providers/Microsoft.MachineLearningServices/operations"),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices/usages.go b/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices/usages.go
new file mode 100644
index 000000000000..95c27149def3
--- /dev/null
+++ b/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices/usages.go
@@ -0,0 +1,161 @@
+package machinelearningservices
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// UsagesClient is the these APIs allow end users to operate on Azure Machine Learning Workspace resources.
+type UsagesClient struct {
+ BaseClient
+}
+
+// NewUsagesClient creates an instance of the UsagesClient client.
+func NewUsagesClient(subscriptionID string) UsagesClient {
+ return NewUsagesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewUsagesClientWithBaseURI creates an instance of the UsagesClient client.
+func NewUsagesClientWithBaseURI(baseURI string, subscriptionID string) UsagesClient {
+ return UsagesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// List gets the current usage information as well as limits for AML resources for given subscription and location.
+// Parameters:
+// location - the location for which resource usage is queried.
+func (client UsagesClient) List(ctx context.Context, location string) (result ListUsagesResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/UsagesClient.List")
+ defer func() {
+ sc := -1
+ if result.lur.Response.Response != nil {
+ sc = result.lur.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: location,
+ Constraints: []validation.Constraint{{Target: "location", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("machinelearningservices.UsagesClient", "List", err.Error())
+ }
+
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx, location)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.UsagesClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.lur.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "machinelearningservices.UsagesClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.lur, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.UsagesClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client UsagesClient) ListPreparer(ctx context.Context, location string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "location": autorest.Encode("path", location),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.MachineLearningServices/locations/{location}/usages", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client UsagesClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client UsagesClient) ListResponder(resp *http.Response) (result ListUsagesResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client UsagesClient) listNextResults(ctx context.Context, lastResults ListUsagesResult) (result ListUsagesResult, err error) {
+ req, err := lastResults.listUsagesResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "machinelearningservices.UsagesClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "machinelearningservices.UsagesClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.UsagesClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client UsagesClient) ListComplete(ctx context.Context, location string) (result ListUsagesResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/UsagesClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, location)
+ return
+}
diff --git a/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices/version.go b/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices/version.go
new file mode 100644
index 000000000000..5ee4cafe92aa
--- /dev/null
+++ b/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices/version.go
@@ -0,0 +1,30 @@
+package machinelearningservices
+
+import "github.com/Azure/azure-sdk-for-go/version"
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+// UserAgent returns the UserAgent string to use when sending http.Requests.
+func UserAgent() string {
+ return "Azure-SDK-For-Go/" + version.Number + " machinelearningservices/2019-06-01"
+}
+
+// Version returns the semantic version (see http://semver.org) of the client.
+func Version() string {
+ return version.Number
+}
diff --git a/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices/virtualmachinesizes.go b/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices/virtualmachinesizes.go
new file mode 100644
index 000000000000..e1bab4877e29
--- /dev/null
+++ b/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices/virtualmachinesizes.go
@@ -0,0 +1,124 @@
+package machinelearningservices
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// VirtualMachineSizesClient is the these APIs allow end users to operate on Azure Machine Learning Workspace
+// resources.
+type VirtualMachineSizesClient struct {
+ BaseClient
+}
+
+// NewVirtualMachineSizesClient creates an instance of the VirtualMachineSizesClient client.
+func NewVirtualMachineSizesClient(subscriptionID string) VirtualMachineSizesClient {
+ return NewVirtualMachineSizesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewVirtualMachineSizesClientWithBaseURI creates an instance of the VirtualMachineSizesClient client.
+func NewVirtualMachineSizesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineSizesClient {
+ return VirtualMachineSizesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// List returns supported VM Sizes in a location
+// Parameters:
+// location - the location upon which virtual-machine-sizes is queried.
+func (client VirtualMachineSizesClient) List(ctx context.Context, location string) (result VirtualMachineSizeListResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineSizesClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: location,
+ Constraints: []validation.Constraint{{Target: "location", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("machinelearningservices.VirtualMachineSizesClient", "List", err.Error())
+ }
+
+ req, err := client.ListPreparer(ctx, location)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.VirtualMachineSizesClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "machinelearningservices.VirtualMachineSizesClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.VirtualMachineSizesClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client VirtualMachineSizesClient) ListPreparer(ctx context.Context, location string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "location": autorest.Encode("path", location),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.MachineLearningServices/locations/{location}/vmSizes", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client VirtualMachineSizesClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client VirtualMachineSizesClient) ListResponder(resp *http.Response) (result VirtualMachineSizeListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices/workspaces.go b/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices/workspaces.go
new file mode 100644
index 000000000000..6b4a947da202
--- /dev/null
+++ b/services/machinelearningservices/mgmt/2019-06-01/machinelearningservices/workspaces.go
@@ -0,0 +1,741 @@
+package machinelearningservices
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// WorkspacesClient is the these APIs allow end users to operate on Azure Machine Learning Workspace resources.
+type WorkspacesClient struct {
+ BaseClient
+}
+
+// NewWorkspacesClient creates an instance of the WorkspacesClient client.
+func NewWorkspacesClient(subscriptionID string) WorkspacesClient {
+ return NewWorkspacesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewWorkspacesClientWithBaseURI creates an instance of the WorkspacesClient client.
+func NewWorkspacesClientWithBaseURI(baseURI string, subscriptionID string) WorkspacesClient {
+ return WorkspacesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate creates or updates a workspace with the specified parameters.
+// Parameters:
+// resourceGroupName - name of the resource group in which workspace is located.
+// workspaceName - name of Azure Machine Learning workspace.
+// parameters - the parameters for creating or updating a machine learning workspace.
+func (client WorkspacesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, parameters Workspace) (result Workspace, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/WorkspacesClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, workspaceName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.WorkspacesClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateOrUpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "machinelearningservices.WorkspacesClient", "CreateOrUpdate", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.WorkspacesClient", "CreateOrUpdate", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client WorkspacesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, workspaceName string, parameters Workspace) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "workspaceName": autorest.Encode("path", workspaceName),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client WorkspacesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client WorkspacesClient) CreateOrUpdateResponder(resp *http.Response) (result Workspace, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes a machine learning workspace.
+// Parameters:
+// resourceGroupName - name of the resource group in which workspace is located.
+// workspaceName - name of Azure Machine Learning workspace.
+func (client WorkspacesClient) Delete(ctx context.Context, resourceGroupName string, workspaceName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/WorkspacesClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, resourceGroupName, workspaceName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.WorkspacesClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "machinelearningservices.WorkspacesClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.WorkspacesClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client WorkspacesClient) DeletePreparer(ctx context.Context, resourceGroupName string, workspaceName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "workspaceName": autorest.Encode("path", workspaceName),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client WorkspacesClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client WorkspacesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets the properties of the specified machine learning workspace.
+// Parameters:
+// resourceGroupName - name of the resource group in which workspace is located.
+// workspaceName - name of Azure Machine Learning workspace.
+func (client WorkspacesClient) Get(ctx context.Context, resourceGroupName string, workspaceName string) (result Workspace, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/WorkspacesClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, resourceGroupName, workspaceName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.WorkspacesClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "machinelearningservices.WorkspacesClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.WorkspacesClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client WorkspacesClient) GetPreparer(ctx context.Context, resourceGroupName string, workspaceName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "workspaceName": autorest.Encode("path", workspaceName),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client WorkspacesClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client WorkspacesClient) GetResponder(resp *http.Response) (result Workspace, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByResourceGroup lists all the available machine learning workspaces under the specified resource group.
+// Parameters:
+// resourceGroupName - name of the resource group in which workspace is located.
+// skiptoken - continuation token for pagination.
+func (client WorkspacesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string, skiptoken string) (result WorkspaceListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/WorkspacesClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.wlr.Response.Response != nil {
+ sc = result.wlr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listByResourceGroupNextResults
+ req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName, skiptoken)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.WorkspacesClient", "ListByResourceGroup", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByResourceGroupSender(req)
+ if err != nil {
+ result.wlr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "machinelearningservices.WorkspacesClient", "ListByResourceGroup", resp, "Failure sending request")
+ return
+ }
+
+ result.wlr, err = client.ListByResourceGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.WorkspacesClient", "ListByResourceGroup", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
+func (client WorkspacesClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string, skiptoken string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(skiptoken) > 0 {
+ queryParameters["$skiptoken"] = autorest.Encode("query", skiptoken)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
+// http.Response Body if it receives an error.
+func (client WorkspacesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
+// closes the http.Response Body.
+func (client WorkspacesClient) ListByResourceGroupResponder(resp *http.Response) (result WorkspaceListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByResourceGroupNextResults retrieves the next set of results, if any.
+func (client WorkspacesClient) listByResourceGroupNextResults(ctx context.Context, lastResults WorkspaceListResult) (result WorkspaceListResult, err error) {
+ req, err := lastResults.workspaceListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "machinelearningservices.WorkspacesClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByResourceGroupSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "machinelearningservices.WorkspacesClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByResourceGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.WorkspacesClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
+func (client WorkspacesClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string, skiptoken string) (result WorkspaceListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/WorkspacesClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByResourceGroup(ctx, resourceGroupName, skiptoken)
+ return
+}
+
+// ListBySubscription lists all the available machine learning workspaces under the specified subscription.
+// Parameters:
+// skiptoken - continuation token for pagination.
+func (client WorkspacesClient) ListBySubscription(ctx context.Context, skiptoken string) (result WorkspaceListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/WorkspacesClient.ListBySubscription")
+ defer func() {
+ sc := -1
+ if result.wlr.Response.Response != nil {
+ sc = result.wlr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listBySubscriptionNextResults
+ req, err := client.ListBySubscriptionPreparer(ctx, skiptoken)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.WorkspacesClient", "ListBySubscription", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListBySubscriptionSender(req)
+ if err != nil {
+ result.wlr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "machinelearningservices.WorkspacesClient", "ListBySubscription", resp, "Failure sending request")
+ return
+ }
+
+ result.wlr, err = client.ListBySubscriptionResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.WorkspacesClient", "ListBySubscription", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListBySubscriptionPreparer prepares the ListBySubscription request.
+func (client WorkspacesClient) ListBySubscriptionPreparer(ctx context.Context, skiptoken string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(skiptoken) > 0 {
+ queryParameters["$skiptoken"] = autorest.Encode("query", skiptoken)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.MachineLearningServices/workspaces", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListBySubscriptionSender sends the ListBySubscription request. The method will close the
+// http.Response Body if it receives an error.
+func (client WorkspacesClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always
+// closes the http.Response Body.
+func (client WorkspacesClient) ListBySubscriptionResponder(resp *http.Response) (result WorkspaceListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listBySubscriptionNextResults retrieves the next set of results, if any.
+func (client WorkspacesClient) listBySubscriptionNextResults(ctx context.Context, lastResults WorkspaceListResult) (result WorkspaceListResult, err error) {
+ req, err := lastResults.workspaceListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "machinelearningservices.WorkspacesClient", "listBySubscriptionNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListBySubscriptionSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "machinelearningservices.WorkspacesClient", "listBySubscriptionNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListBySubscriptionResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.WorkspacesClient", "listBySubscriptionNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListBySubscriptionComplete enumerates all values, automatically crossing page boundaries as required.
+func (client WorkspacesClient) ListBySubscriptionComplete(ctx context.Context, skiptoken string) (result WorkspaceListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/WorkspacesClient.ListBySubscription")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListBySubscription(ctx, skiptoken)
+ return
+}
+
+// ListKeys lists all the keys associated with this workspace. This includes keys for the storage account, app insights
+// and password for container registry
+// Parameters:
+// resourceGroupName - name of the resource group in which workspace is located.
+// workspaceName - name of Azure Machine Learning workspace.
+func (client WorkspacesClient) ListKeys(ctx context.Context, resourceGroupName string, workspaceName string) (result ListWorkspaceKeysResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/WorkspacesClient.ListKeys")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.ListKeysPreparer(ctx, resourceGroupName, workspaceName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.WorkspacesClient", "ListKeys", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListKeysSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "machinelearningservices.WorkspacesClient", "ListKeys", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListKeysResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.WorkspacesClient", "ListKeys", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListKeysPreparer prepares the ListKeys request.
+func (client WorkspacesClient) ListKeysPreparer(ctx context.Context, resourceGroupName string, workspaceName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "workspaceName": autorest.Encode("path", workspaceName),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/listKeys", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListKeysSender sends the ListKeys request. The method will close the
+// http.Response Body if it receives an error.
+func (client WorkspacesClient) ListKeysSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListKeysResponder handles the response to the ListKeys request. The method always
+// closes the http.Response Body.
+func (client WorkspacesClient) ListKeysResponder(resp *http.Response) (result ListWorkspaceKeysResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ResyncKeys resync all the keys associated with this workspace. This includes keys for the storage account, app
+// insights and password for container registry
+// Parameters:
+// resourceGroupName - name of the resource group in which workspace is located.
+// workspaceName - name of Azure Machine Learning workspace.
+func (client WorkspacesClient) ResyncKeys(ctx context.Context, resourceGroupName string, workspaceName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/WorkspacesClient.ResyncKeys")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.ResyncKeysPreparer(ctx, resourceGroupName, workspaceName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.WorkspacesClient", "ResyncKeys", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ResyncKeysSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "machinelearningservices.WorkspacesClient", "ResyncKeys", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ResyncKeysResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.WorkspacesClient", "ResyncKeys", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ResyncKeysPreparer prepares the ResyncKeys request.
+func (client WorkspacesClient) ResyncKeysPreparer(ctx context.Context, resourceGroupName string, workspaceName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "workspaceName": autorest.Encode("path", workspaceName),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/resyncKeys", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ResyncKeysSender sends the ResyncKeys request. The method will close the
+// http.Response Body if it receives an error.
+func (client WorkspacesClient) ResyncKeysSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ResyncKeysResponder handles the response to the ResyncKeys request. The method always
+// closes the http.Response Body.
+func (client WorkspacesClient) ResyncKeysResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Update updates a machine learning workspace with the specified parameters.
+// Parameters:
+// resourceGroupName - name of the resource group in which workspace is located.
+// workspaceName - name of Azure Machine Learning workspace.
+// parameters - the parameters for updating a machine learning workspace.
+func (client WorkspacesClient) Update(ctx context.Context, resourceGroupName string, workspaceName string, parameters WorkspaceUpdateParameters) (result Workspace, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/WorkspacesClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.UpdatePreparer(ctx, resourceGroupName, workspaceName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.WorkspacesClient", "Update", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.UpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "machinelearningservices.WorkspacesClient", "Update", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.UpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "machinelearningservices.WorkspacesClient", "Update", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// UpdatePreparer prepares the Update request.
+func (client WorkspacesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, workspaceName string, parameters WorkspaceUpdateParameters) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "workspaceName": autorest.Encode("path", workspaceName),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateSender sends the Update request. The method will close the
+// http.Response Body if it receives an error.
+func (client WorkspacesClient) UpdateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// UpdateResponder handles the response to the Update request. The method always
+// closes the http.Response Body.
+func (client WorkspacesClient) UpdateResponder(resp *http.Response) (result Workspace, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/network/mgmt/2018-07-01/network/models.go b/services/network/mgmt/2018-07-01/network/models.go
index 7061c8d8a094..4280b942f451 100644
--- a/services/network/mgmt/2018-07-01/network/models.go
+++ b/services/network/mgmt/2018-07-01/network/models.go
@@ -15001,6 +15001,8 @@ type PublicIPPrefixPropertiesFormat struct {
IPPrefix *string `json:"ipPrefix,omitempty"`
// PublicIPAddresses - The list of all referenced PublicIPAddresses
PublicIPAddresses *[]ReferencedPublicIPAddress `json:"publicIPAddresses,omitempty"`
+ // LoadBalancerFrontendIPConfiguration - READ-ONLY; The reference to load balancer frontend IP configuration associated with the public IP prefix.
+ LoadBalancerFrontendIPConfiguration *SubResource `json:"loadBalancerFrontendIpConfiguration,omitempty"`
// ResourceGUID - The resource GUID property of the public IP prefix resource.
ResourceGUID *string `json:"resourceGuid,omitempty"`
// ProvisioningState - The provisioning state of the Public IP prefix resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
diff --git a/services/network/mgmt/2018-08-01/network/models.go b/services/network/mgmt/2018-08-01/network/models.go
index 3e5fd091d679..93528ce26775 100644
--- a/services/network/mgmt/2018-08-01/network/models.go
+++ b/services/network/mgmt/2018-08-01/network/models.go
@@ -19684,6 +19684,8 @@ type PublicIPPrefixPropertiesFormat struct {
IPPrefix *string `json:"ipPrefix,omitempty"`
// PublicIPAddresses - The list of all referenced PublicIPAddresses
PublicIPAddresses *[]ReferencedPublicIPAddress `json:"publicIPAddresses,omitempty"`
+ // LoadBalancerFrontendIPConfiguration - READ-ONLY; The reference to load balancer frontend IP configuration associated with the public IP prefix.
+ LoadBalancerFrontendIPConfiguration *SubResource `json:"loadBalancerFrontendIpConfiguration,omitempty"`
// ResourceGUID - The resource GUID property of the public IP prefix resource.
ResourceGUID *string `json:"resourceGuid,omitempty"`
// ProvisioningState - The provisioning state of the Public IP prefix resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
diff --git a/services/network/mgmt/2018-10-01/network/models.go b/services/network/mgmt/2018-10-01/network/models.go
index c45a1898c520..f23b3e99fba5 100644
--- a/services/network/mgmt/2018-10-01/network/models.go
+++ b/services/network/mgmt/2018-10-01/network/models.go
@@ -20039,6 +20039,8 @@ type PublicIPPrefixPropertiesFormat struct {
IPPrefix *string `json:"ipPrefix,omitempty"`
// PublicIPAddresses - The list of all referenced PublicIPAddresses
PublicIPAddresses *[]ReferencedPublicIPAddress `json:"publicIPAddresses,omitempty"`
+ // LoadBalancerFrontendIPConfiguration - READ-ONLY; The reference to load balancer frontend IP configuration associated with the public IP prefix.
+ LoadBalancerFrontendIPConfiguration *SubResource `json:"loadBalancerFrontendIpConfiguration,omitempty"`
// ResourceGUID - The resource GUID property of the public IP prefix resource.
ResourceGUID *string `json:"resourceGuid,omitempty"`
// ProvisioningState - The provisioning state of the Public IP prefix resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
diff --git a/services/network/mgmt/2018-11-01/network/models.go b/services/network/mgmt/2018-11-01/network/models.go
index 4016068030d0..7694128c6daa 100644
--- a/services/network/mgmt/2018-11-01/network/models.go
+++ b/services/network/mgmt/2018-11-01/network/models.go
@@ -20405,6 +20405,8 @@ type PublicIPPrefixPropertiesFormat struct {
IPPrefix *string `json:"ipPrefix,omitempty"`
// PublicIPAddresses - The list of all referenced PublicIPAddresses
PublicIPAddresses *[]ReferencedPublicIPAddress `json:"publicIPAddresses,omitempty"`
+ // LoadBalancerFrontendIPConfiguration - READ-ONLY; The reference to load balancer frontend IP configuration associated with the public IP prefix.
+ LoadBalancerFrontendIPConfiguration *SubResource `json:"loadBalancerFrontendIpConfiguration,omitempty"`
// ResourceGUID - The resource GUID property of the public IP prefix resource.
ResourceGUID *string `json:"resourceGuid,omitempty"`
// ProvisioningState - The provisioning state of the Public IP prefix resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
diff --git a/services/network/mgmt/2018-12-01/network/models.go b/services/network/mgmt/2018-12-01/network/models.go
index faf91b63e765..d5ffe92ada2c 100644
--- a/services/network/mgmt/2018-12-01/network/models.go
+++ b/services/network/mgmt/2018-12-01/network/models.go
@@ -20947,6 +20947,8 @@ type PublicIPPrefixPropertiesFormat struct {
IPPrefix *string `json:"ipPrefix,omitempty"`
// PublicIPAddresses - The list of all referenced PublicIPAddresses
PublicIPAddresses *[]ReferencedPublicIPAddress `json:"publicIPAddresses,omitempty"`
+ // LoadBalancerFrontendIPConfiguration - READ-ONLY; The reference to load balancer frontend IP configuration associated with the public IP prefix.
+ LoadBalancerFrontendIPConfiguration *SubResource `json:"loadBalancerFrontendIpConfiguration,omitempty"`
// ResourceGUID - The resource GUID property of the public IP prefix resource.
ResourceGUID *string `json:"resourceGuid,omitempty"`
// ProvisioningState - The provisioning state of the Public IP prefix resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
diff --git a/services/network/mgmt/2019-02-01/network/models.go b/services/network/mgmt/2019-02-01/network/models.go
index 67ef514fc2d2..8c789a21518e 100644
--- a/services/network/mgmt/2019-02-01/network/models.go
+++ b/services/network/mgmt/2019-02-01/network/models.go
@@ -3595,10 +3595,10 @@ type ApplicationGatewayOnDemandProbe struct {
PickHostNameFromBackendHTTPSettings *bool `json:"pickHostNameFromBackendHttpSettings,omitempty"`
// Match - Criterion for classifying a healthy probe response.
Match *ApplicationGatewayProbeHealthResponseMatch `json:"match,omitempty"`
- // BackendPoolName - Name of backend pool of application gateway to which probe request will be sent.
- BackendPoolName *string `json:"backendPoolName,omitempty"`
- // BackendHTTPSettingName - Name of backend http setting of application gateway to be used for test probe
- BackendHTTPSettingName *string `json:"backendHttpSettingName,omitempty"`
+ // BackendAddressPool - Reference of backend pool of application gateway to which probe request will be sent.
+ BackendAddressPool *SubResource `json:"backendAddressPool,omitempty"`
+ // BackendHTTPSettings - Reference of backend http setting of application gateway to be used for test probe.
+ BackendHTTPSettings *SubResource `json:"backendHttpSettings,omitempty"`
}
// ApplicationGatewayPathRule path rule of URL path map of an application gateway.
@@ -21451,17 +21451,19 @@ type PublicIPPrefixPropertiesFormat struct {
IPTags *[]IPTag `json:"ipTags,omitempty"`
// PrefixLength - The Length of the Public IP Prefix.
PrefixLength *int32 `json:"prefixLength,omitempty"`
- // IPPrefix - The allocated Prefix
+ // IPPrefix - The allocated Prefix.
IPPrefix *string `json:"ipPrefix,omitempty"`
- // PublicIPAddresses - The list of all referenced PublicIPAddresses
+ // PublicIPAddresses - The list of all referenced PublicIPAddresses.
PublicIPAddresses *[]ReferencedPublicIPAddress `json:"publicIPAddresses,omitempty"`
+ // LoadBalancerFrontendIPConfiguration - READ-ONLY; The reference to load balancer frontend IP configuration associated with the public IP prefix.
+ LoadBalancerFrontendIPConfiguration *SubResource `json:"loadBalancerFrontendIpConfiguration,omitempty"`
// ResourceGUID - The resource GUID property of the public IP prefix resource.
ResourceGUID *string `json:"resourceGuid,omitempty"`
// ProvisioningState - The provisioning state of the Public IP prefix resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
ProvisioningState *string `json:"provisioningState,omitempty"`
}
-// PublicIPPrefixSku SKU of a public IP prefix
+// PublicIPPrefixSku SKU of a public IP prefix.
type PublicIPPrefixSku struct {
// Name - Name of a public IP prefix SKU. Possible values include: 'PublicIPPrefixSkuNameStandard'
Name PublicIPPrefixSkuName `json:"name,omitempty"`
@@ -21475,7 +21477,7 @@ type QueryTroubleshootingParameters struct {
// ReferencedPublicIPAddress reference to a public IP address.
type ReferencedPublicIPAddress struct {
- // ID - The PublicIPAddress Reference
+ // ID - The PublicIPAddress Reference.
ID *string `json:"id,omitempty"`
}
diff --git a/services/network/mgmt/2019-04-01/network/models.go b/services/network/mgmt/2019-04-01/network/models.go
index e9a9ea79dc72..042b85a49b10 100644
--- a/services/network/mgmt/2019-04-01/network/models.go
+++ b/services/network/mgmt/2019-04-01/network/models.go
@@ -3595,10 +3595,10 @@ type ApplicationGatewayOnDemandProbe struct {
PickHostNameFromBackendHTTPSettings *bool `json:"pickHostNameFromBackendHttpSettings,omitempty"`
// Match - Criterion for classifying a healthy probe response.
Match *ApplicationGatewayProbeHealthResponseMatch `json:"match,omitempty"`
- // BackendPoolName - Name of backend pool of application gateway to which probe request will be sent.
- BackendPoolName *string `json:"backendPoolName,omitempty"`
- // BackendHTTPSettingName - Name of backend http setting of application gateway to be used for test probe.
- BackendHTTPSettingName *string `json:"backendHttpSettingName,omitempty"`
+ // BackendAddressPool - Reference of backend pool of application gateway to which probe request will be sent.
+ BackendAddressPool *SubResource `json:"backendAddressPool,omitempty"`
+ // BackendHTTPSettings - Reference of backend http setting of application gateway to be used for test probe.
+ BackendHTTPSettings *SubResource `json:"backendHttpSettings,omitempty"`
}
// ApplicationGatewayPathRule path rule of URL path map of an application gateway.
@@ -22911,6 +22911,8 @@ type PublicIPPrefixPropertiesFormat struct {
IPPrefix *string `json:"ipPrefix,omitempty"`
// PublicIPAddresses - The list of all referenced PublicIPAddresses.
PublicIPAddresses *[]ReferencedPublicIPAddress `json:"publicIPAddresses,omitempty"`
+ // LoadBalancerFrontendIPConfiguration - READ-ONLY; The reference to load balancer frontend IP configuration associated with the public IP prefix.
+ LoadBalancerFrontendIPConfiguration *SubResource `json:"loadBalancerFrontendIpConfiguration,omitempty"`
// ResourceGUID - The resource GUID property of the public IP prefix resource.
ResourceGUID *string `json:"resourceGuid,omitempty"`
// ProvisioningState - The provisioning state of the Public IP prefix resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
diff --git a/services/network/mgmt/2019-04-01/network/servicetags.go b/services/network/mgmt/2019-04-01/network/servicetags.go
index 879ccc5535ea..13f39ba07e90 100644
--- a/services/network/mgmt/2019-04-01/network/servicetags.go
+++ b/services/network/mgmt/2019-04-01/network/servicetags.go
@@ -42,7 +42,9 @@ func NewServiceTagsClientWithBaseURI(baseURI string, subscriptionID string) Serv
// List gets a list of service tag information resources.
// Parameters:
-// location - the location.
+// location - the location that will be used as a reference for version (not as a filter based on location, you
+// will get the list of service tags with prefix details across all regions but limited to the cloud that your
+// subscription belongs to).
func (client ServiceTagsClient) List(ctx context.Context, location string) (result ServiceTagsListResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ServiceTagsClient.List")
diff --git a/services/network/mgmt/2019-06-01/network/firewallpolicies.go b/services/network/mgmt/2019-06-01/network/firewallpolicies.go
new file mode 100644
index 000000000000..a15914a15439
--- /dev/null
+++ b/services/network/mgmt/2019-06-01/network/firewallpolicies.go
@@ -0,0 +1,582 @@
+package network
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// FirewallPoliciesClient is the network Client
+type FirewallPoliciesClient struct {
+ BaseClient
+}
+
+// NewFirewallPoliciesClient creates an instance of the FirewallPoliciesClient client.
+func NewFirewallPoliciesClient(subscriptionID string) FirewallPoliciesClient {
+ return NewFirewallPoliciesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewFirewallPoliciesClientWithBaseURI creates an instance of the FirewallPoliciesClient client.
+func NewFirewallPoliciesClientWithBaseURI(baseURI string, subscriptionID string) FirewallPoliciesClient {
+ return FirewallPoliciesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate creates or updates the specified Firewall Policy.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// firewallPolicyName - the name of the Firewall Policy.
+// parameters - parameters supplied to the create or update Firewall Policy operation.
+func (client FirewallPoliciesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, firewallPolicyName string, parameters FirewallPolicy) (result FirewallPoliciesCreateOrUpdateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPoliciesClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, firewallPolicyName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client FirewallPoliciesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, firewallPolicyName string, parameters FirewallPolicy) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "firewallPolicyName": autorest.Encode("path", firewallPolicyName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ parameters.Etag = nil
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client FirewallPoliciesClient) CreateOrUpdateSender(req *http.Request) (future FirewallPoliciesCreateOrUpdateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client FirewallPoliciesClient) CreateOrUpdateResponder(resp *http.Response) (result FirewallPolicy, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes the specified Firewall Policy.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// firewallPolicyName - the name of the Firewall Policy.
+func (client FirewallPoliciesClient) Delete(ctx context.Context, resourceGroupName string, firewallPolicyName string) (result FirewallPoliciesDeleteFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPoliciesClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, resourceGroupName, firewallPolicyName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.DeleteSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "Delete", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client FirewallPoliciesClient) DeletePreparer(ctx context.Context, resourceGroupName string, firewallPolicyName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "firewallPolicyName": autorest.Encode("path", firewallPolicyName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client FirewallPoliciesClient) DeleteSender(req *http.Request) (future FirewallPoliciesDeleteFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client FirewallPoliciesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets the specified Firewall Policy.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// firewallPolicyName - the name of the Firewall Policy.
+// expand - expands referenced resources.
+func (client FirewallPoliciesClient) Get(ctx context.Context, resourceGroupName string, firewallPolicyName string, expand string) (result FirewallPolicy, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPoliciesClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, resourceGroupName, firewallPolicyName, expand)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client FirewallPoliciesClient) GetPreparer(ctx context.Context, resourceGroupName string, firewallPolicyName string, expand string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "firewallPolicyName": autorest.Encode("path", firewallPolicyName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(expand) > 0 {
+ queryParameters["$expand"] = autorest.Encode("query", expand)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client FirewallPoliciesClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client FirewallPoliciesClient) GetResponder(resp *http.Response) (result FirewallPolicy, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List lists all Firewall Policies in a resource group.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+func (client FirewallPoliciesClient) List(ctx context.Context, resourceGroupName string) (result FirewallPolicyListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPoliciesClient.List")
+ defer func() {
+ sc := -1
+ if result.fplr.Response.Response != nil {
+ sc = result.fplr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx, resourceGroupName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.fplr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.fplr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client FirewallPoliciesClient) ListPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client FirewallPoliciesClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client FirewallPoliciesClient) ListResponder(resp *http.Response) (result FirewallPolicyListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client FirewallPoliciesClient) listNextResults(ctx context.Context, lastResults FirewallPolicyListResult) (result FirewallPolicyListResult, err error) {
+ req, err := lastResults.firewallPolicyListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client FirewallPoliciesClient) ListComplete(ctx context.Context, resourceGroupName string) (result FirewallPolicyListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPoliciesClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, resourceGroupName)
+ return
+}
+
+// ListAll gets all the Firewall Policies in a subscription.
+func (client FirewallPoliciesClient) ListAll(ctx context.Context) (result FirewallPolicyListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPoliciesClient.ListAll")
+ defer func() {
+ sc := -1
+ if result.fplr.Response.Response != nil {
+ sc = result.fplr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listAllNextResults
+ req, err := client.ListAllPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "ListAll", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListAllSender(req)
+ if err != nil {
+ result.fplr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "ListAll", resp, "Failure sending request")
+ return
+ }
+
+ result.fplr, err = client.ListAllResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "ListAll", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListAllPreparer prepares the ListAll request.
+func (client FirewallPoliciesClient) ListAllPreparer(ctx context.Context) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/firewallPolicies", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListAllSender sends the ListAll request. The method will close the
+// http.Response Body if it receives an error.
+func (client FirewallPoliciesClient) ListAllSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListAllResponder handles the response to the ListAll request. The method always
+// closes the http.Response Body.
+func (client FirewallPoliciesClient) ListAllResponder(resp *http.Response) (result FirewallPolicyListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listAllNextResults retrieves the next set of results, if any.
+func (client FirewallPoliciesClient) listAllNextResults(ctx context.Context, lastResults FirewallPolicyListResult) (result FirewallPolicyListResult, err error) {
+ req, err := lastResults.firewallPolicyListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "listAllNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListAllSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "listAllNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListAllResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "listAllNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListAllComplete enumerates all values, automatically crossing page boundaries as required.
+func (client FirewallPoliciesClient) ListAllComplete(ctx context.Context) (result FirewallPolicyListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPoliciesClient.ListAll")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListAll(ctx)
+ return
+}
+
+// UpdateTags updates a Firewall Policy Tags.
+// Parameters:
+// resourceGroupName - the resource group name of the Firewall Policy.
+// firewallPolicyName - the name of the Firewall Policy being updated.
+// firewallPolicyParameters - parameters supplied to Update Firewall Policy tags.
+func (client FirewallPoliciesClient) UpdateTags(ctx context.Context, resourceGroupName string, firewallPolicyName string, firewallPolicyParameters TagsObject) (result FirewallPolicy, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPoliciesClient.UpdateTags")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.UpdateTagsPreparer(ctx, resourceGroupName, firewallPolicyName, firewallPolicyParameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "UpdateTags", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.UpdateTagsSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "UpdateTags", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.UpdateTagsResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.FirewallPoliciesClient", "UpdateTags", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// UpdateTagsPreparer prepares the UpdateTags request.
+func (client FirewallPoliciesClient) UpdateTagsPreparer(ctx context.Context, resourceGroupName string, firewallPolicyName string, firewallPolicyParameters TagsObject) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "firewallPolicyName": autorest.Encode("path", firewallPolicyName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}", pathParameters),
+ autorest.WithJSON(firewallPolicyParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateTagsSender sends the UpdateTags request. The method will close the
+// http.Response Body if it receives an error.
+func (client FirewallPoliciesClient) UpdateTagsSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// UpdateTagsResponder handles the response to the UpdateTags request. The method always
+// closes the http.Response Body.
+func (client FirewallPoliciesClient) UpdateTagsResponder(resp *http.Response) (result FirewallPolicy, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/network/mgmt/2019-06-01/network/firewallpolicyrulegroups.go b/services/network/mgmt/2019-06-01/network/firewallpolicyrulegroups.go
new file mode 100644
index 000000000000..989c3d1635d0
--- /dev/null
+++ b/services/network/mgmt/2019-06-01/network/firewallpolicyrulegroups.go
@@ -0,0 +1,409 @@
+package network
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// FirewallPolicyRuleGroupsClient is the network Client
+type FirewallPolicyRuleGroupsClient struct {
+ BaseClient
+}
+
+// NewFirewallPolicyRuleGroupsClient creates an instance of the FirewallPolicyRuleGroupsClient client.
+func NewFirewallPolicyRuleGroupsClient(subscriptionID string) FirewallPolicyRuleGroupsClient {
+ return NewFirewallPolicyRuleGroupsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewFirewallPolicyRuleGroupsClientWithBaseURI creates an instance of the FirewallPolicyRuleGroupsClient client.
+func NewFirewallPolicyRuleGroupsClientWithBaseURI(baseURI string, subscriptionID string) FirewallPolicyRuleGroupsClient {
+ return FirewallPolicyRuleGroupsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate creates or updates the specified FirewallPolicyRuleGroup.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// firewallPolicyName - the name of the Firewall Policy.
+// ruleGroupName - the name of the FirewallPolicyRuleGroup.
+// parameters - parameters supplied to the create or update FirewallPolicyRuleGroup operation.
+func (client FirewallPolicyRuleGroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, firewallPolicyName string, ruleGroupName string, parameters FirewallPolicyRuleGroup) (result FirewallPolicyRuleGroupsCreateOrUpdateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPolicyRuleGroupsClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters.FirewallPolicyRuleGroupProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.FirewallPolicyRuleGroupProperties.Priority", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.FirewallPolicyRuleGroupProperties.Priority", Name: validation.InclusiveMaximum, Rule: int64(65000), Chain: nil},
+ {Target: "parameters.FirewallPolicyRuleGroupProperties.Priority", Name: validation.InclusiveMinimum, Rule: 100, Chain: nil},
+ }},
+ }}}}}); err != nil {
+ return result, validation.NewError("network.FirewallPolicyRuleGroupsClient", "CreateOrUpdate", err.Error())
+ }
+
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, firewallPolicyName, ruleGroupName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client FirewallPolicyRuleGroupsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, firewallPolicyName string, ruleGroupName string, parameters FirewallPolicyRuleGroup) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "firewallPolicyName": autorest.Encode("path", firewallPolicyName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "ruleGroupName": autorest.Encode("path", ruleGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ parameters.Etag = nil
+ parameters.Type = nil
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}/ruleGroups/{ruleGroupName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client FirewallPolicyRuleGroupsClient) CreateOrUpdateSender(req *http.Request) (future FirewallPolicyRuleGroupsCreateOrUpdateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client FirewallPolicyRuleGroupsClient) CreateOrUpdateResponder(resp *http.Response) (result FirewallPolicyRuleGroup, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes the specified FirewallPolicyRuleGroup.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// firewallPolicyName - the name of the Firewall Policy.
+// ruleGroupName - the name of the FirewallPolicyRuleGroup.
+func (client FirewallPolicyRuleGroupsClient) Delete(ctx context.Context, resourceGroupName string, firewallPolicyName string, ruleGroupName string) (result FirewallPolicyRuleGroupsDeleteFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPolicyRuleGroupsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, resourceGroupName, firewallPolicyName, ruleGroupName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.DeleteSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsClient", "Delete", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client FirewallPolicyRuleGroupsClient) DeletePreparer(ctx context.Context, resourceGroupName string, firewallPolicyName string, ruleGroupName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "firewallPolicyName": autorest.Encode("path", firewallPolicyName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "ruleGroupName": autorest.Encode("path", ruleGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}/ruleGroups/{ruleGroupName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client FirewallPolicyRuleGroupsClient) DeleteSender(req *http.Request) (future FirewallPolicyRuleGroupsDeleteFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client FirewallPolicyRuleGroupsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets the specified FirewallPolicyRuleGroup.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// firewallPolicyName - the name of the Firewall Policy.
+// ruleGroupName - the name of the FirewallPolicyRuleGroup.
+func (client FirewallPolicyRuleGroupsClient) Get(ctx context.Context, resourceGroupName string, firewallPolicyName string, ruleGroupName string) (result FirewallPolicyRuleGroup, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPolicyRuleGroupsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, resourceGroupName, firewallPolicyName, ruleGroupName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client FirewallPolicyRuleGroupsClient) GetPreparer(ctx context.Context, resourceGroupName string, firewallPolicyName string, ruleGroupName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "firewallPolicyName": autorest.Encode("path", firewallPolicyName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "ruleGroupName": autorest.Encode("path", ruleGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}/ruleGroups/{ruleGroupName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client FirewallPolicyRuleGroupsClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client FirewallPolicyRuleGroupsClient) GetResponder(resp *http.Response) (result FirewallPolicyRuleGroup, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List lists all FirewallPolicyRuleGroups in a FirewallPolicy resource.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// firewallPolicyName - the name of the Firewall Policy.
+func (client FirewallPolicyRuleGroupsClient) List(ctx context.Context, resourceGroupName string, firewallPolicyName string) (result FirewallPolicyRuleGroupListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPolicyRuleGroupsClient.List")
+ defer func() {
+ sc := -1
+ if result.fprglr.Response.Response != nil {
+ sc = result.fprglr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx, resourceGroupName, firewallPolicyName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.fprglr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.fprglr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client FirewallPolicyRuleGroupsClient) ListPreparer(ctx context.Context, resourceGroupName string, firewallPolicyName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "firewallPolicyName": autorest.Encode("path", firewallPolicyName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}/ruleGroups", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client FirewallPolicyRuleGroupsClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client FirewallPolicyRuleGroupsClient) ListResponder(resp *http.Response) (result FirewallPolicyRuleGroupListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client FirewallPolicyRuleGroupsClient) listNextResults(ctx context.Context, lastResults FirewallPolicyRuleGroupListResult) (result FirewallPolicyRuleGroupListResult, err error) {
+ req, err := lastResults.firewallPolicyRuleGroupListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client FirewallPolicyRuleGroupsClient) ListComplete(ctx context.Context, resourceGroupName string, firewallPolicyName string) (result FirewallPolicyRuleGroupListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPolicyRuleGroupsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, resourceGroupName, firewallPolicyName)
+ return
+}
diff --git a/services/network/mgmt/2019-06-01/network/models.go b/services/network/mgmt/2019-06-01/network/models.go
index 3f4ca3c86802..3d20830d548b 100644
--- a/services/network/mgmt/2019-06-01/network/models.go
+++ b/services/network/mgmt/2019-06-01/network/models.go
@@ -869,6 +869,74 @@ func PossibleExpressRoutePortsEncapsulationValues() []ExpressRoutePortsEncapsula
return []ExpressRoutePortsEncapsulation{Dot1Q, QinQ}
}
+// FirewallPolicyFilterRuleActionType enumerates the values for firewall policy filter rule action type.
+type FirewallPolicyFilterRuleActionType string
+
+const (
+ // FirewallPolicyFilterRuleActionTypeAlert ...
+ FirewallPolicyFilterRuleActionTypeAlert FirewallPolicyFilterRuleActionType = "Alert "
+ // FirewallPolicyFilterRuleActionTypeAllow ...
+ FirewallPolicyFilterRuleActionTypeAllow FirewallPolicyFilterRuleActionType = "Allow"
+ // FirewallPolicyFilterRuleActionTypeDeny ...
+ FirewallPolicyFilterRuleActionTypeDeny FirewallPolicyFilterRuleActionType = "Deny"
+)
+
+// PossibleFirewallPolicyFilterRuleActionTypeValues returns an array of possible values for the FirewallPolicyFilterRuleActionType const type.
+func PossibleFirewallPolicyFilterRuleActionTypeValues() []FirewallPolicyFilterRuleActionType {
+ return []FirewallPolicyFilterRuleActionType{FirewallPolicyFilterRuleActionTypeAlert, FirewallPolicyFilterRuleActionTypeAllow, FirewallPolicyFilterRuleActionTypeDeny}
+}
+
+// FirewallPolicyNatRuleActionType enumerates the values for firewall policy nat rule action type.
+type FirewallPolicyNatRuleActionType string
+
+const (
+ // DNAT ...
+ DNAT FirewallPolicyNatRuleActionType = "DNAT"
+ // SNAT ...
+ SNAT FirewallPolicyNatRuleActionType = "SNAT"
+)
+
+// PossibleFirewallPolicyNatRuleActionTypeValues returns an array of possible values for the FirewallPolicyNatRuleActionType const type.
+func PossibleFirewallPolicyNatRuleActionTypeValues() []FirewallPolicyNatRuleActionType {
+ return []FirewallPolicyNatRuleActionType{DNAT, SNAT}
+}
+
+// FirewallPolicyRuleConditionApplicationProtocolType enumerates the values for firewall policy rule condition
+// application protocol type.
+type FirewallPolicyRuleConditionApplicationProtocolType string
+
+const (
+ // FirewallPolicyRuleConditionApplicationProtocolTypeHTTP ...
+ FirewallPolicyRuleConditionApplicationProtocolTypeHTTP FirewallPolicyRuleConditionApplicationProtocolType = "Http"
+ // FirewallPolicyRuleConditionApplicationProtocolTypeHTTPS ...
+ FirewallPolicyRuleConditionApplicationProtocolTypeHTTPS FirewallPolicyRuleConditionApplicationProtocolType = "Https"
+)
+
+// PossibleFirewallPolicyRuleConditionApplicationProtocolTypeValues returns an array of possible values for the FirewallPolicyRuleConditionApplicationProtocolType const type.
+func PossibleFirewallPolicyRuleConditionApplicationProtocolTypeValues() []FirewallPolicyRuleConditionApplicationProtocolType {
+ return []FirewallPolicyRuleConditionApplicationProtocolType{FirewallPolicyRuleConditionApplicationProtocolTypeHTTP, FirewallPolicyRuleConditionApplicationProtocolTypeHTTPS}
+}
+
+// FirewallPolicyRuleConditionNetworkProtocol enumerates the values for firewall policy rule condition network
+// protocol.
+type FirewallPolicyRuleConditionNetworkProtocol string
+
+const (
+ // FirewallPolicyRuleConditionNetworkProtocolAny ...
+ FirewallPolicyRuleConditionNetworkProtocolAny FirewallPolicyRuleConditionNetworkProtocol = "Any"
+ // FirewallPolicyRuleConditionNetworkProtocolICMP ...
+ FirewallPolicyRuleConditionNetworkProtocolICMP FirewallPolicyRuleConditionNetworkProtocol = "ICMP"
+ // FirewallPolicyRuleConditionNetworkProtocolTCP ...
+ FirewallPolicyRuleConditionNetworkProtocolTCP FirewallPolicyRuleConditionNetworkProtocol = "TCP"
+ // FirewallPolicyRuleConditionNetworkProtocolUDP ...
+ FirewallPolicyRuleConditionNetworkProtocolUDP FirewallPolicyRuleConditionNetworkProtocol = "UDP"
+)
+
+// PossibleFirewallPolicyRuleConditionNetworkProtocolValues returns an array of possible values for the FirewallPolicyRuleConditionNetworkProtocol const type.
+func PossibleFirewallPolicyRuleConditionNetworkProtocolValues() []FirewallPolicyRuleConditionNetworkProtocol {
+ return []FirewallPolicyRuleConditionNetworkProtocol{FirewallPolicyRuleConditionNetworkProtocolAny, FirewallPolicyRuleConditionNetworkProtocolICMP, FirewallPolicyRuleConditionNetworkProtocolTCP, FirewallPolicyRuleConditionNetworkProtocolUDP}
+}
+
// FlowLogFormatType enumerates the values for flow log format type.
type FlowLogFormatType string
@@ -1452,6 +1520,40 @@ func PossibleRouteNextHopTypeValues() []RouteNextHopType {
return []RouteNextHopType{RouteNextHopTypeInternet, RouteNextHopTypeNone, RouteNextHopTypeVirtualAppliance, RouteNextHopTypeVirtualNetworkGateway, RouteNextHopTypeVnetLocal}
}
+// RuleConditionType enumerates the values for rule condition type.
+type RuleConditionType string
+
+const (
+ // RuleConditionTypeApplicationRuleCondition ...
+ RuleConditionTypeApplicationRuleCondition RuleConditionType = "ApplicationRuleCondition"
+ // RuleConditionTypeFirewallPolicyRuleCondition ...
+ RuleConditionTypeFirewallPolicyRuleCondition RuleConditionType = "FirewallPolicyRuleCondition"
+ // RuleConditionTypeNetworkRuleCondition ...
+ RuleConditionTypeNetworkRuleCondition RuleConditionType = "NetworkRuleCondition"
+)
+
+// PossibleRuleConditionTypeValues returns an array of possible values for the RuleConditionType const type.
+func PossibleRuleConditionTypeValues() []RuleConditionType {
+ return []RuleConditionType{RuleConditionTypeApplicationRuleCondition, RuleConditionTypeFirewallPolicyRuleCondition, RuleConditionTypeNetworkRuleCondition}
+}
+
+// RuleType enumerates the values for rule type.
+type RuleType string
+
+const (
+ // RuleTypeFirewallPolicyFilterRule ...
+ RuleTypeFirewallPolicyFilterRule RuleType = "FirewallPolicyFilterRule"
+ // RuleTypeFirewallPolicyNatRule ...
+ RuleTypeFirewallPolicyNatRule RuleType = "FirewallPolicyNatRule"
+ // RuleTypeFirewallPolicyRule ...
+ RuleTypeFirewallPolicyRule RuleType = "FirewallPolicyRule"
+)
+
+// PossibleRuleTypeValues returns an array of possible values for the RuleType const type.
+func PossibleRuleTypeValues() []RuleType {
+ return []RuleType{RuleTypeFirewallPolicyFilterRule, RuleTypeFirewallPolicyNatRule, RuleTypeFirewallPolicyRule}
+}
+
// SecurityRuleAccess enumerates the values for security rule access.
type SecurityRuleAccess string
@@ -3595,10 +3697,10 @@ type ApplicationGatewayOnDemandProbe struct {
PickHostNameFromBackendHTTPSettings *bool `json:"pickHostNameFromBackendHttpSettings,omitempty"`
// Match - Criterion for classifying a healthy probe response.
Match *ApplicationGatewayProbeHealthResponseMatch `json:"match,omitempty"`
- // BackendPoolName - Name of backend pool of application gateway to which probe request will be sent.
- BackendPoolName *string `json:"backendPoolName,omitempty"`
- // BackendHTTPSettingName - Name of backend http setting of application gateway to be used for test probe.
- BackendHTTPSettingName *string `json:"backendHttpSettingName,omitempty"`
+ // BackendAddressPool - Reference of backend pool of application gateway to which probe request will be sent.
+ BackendAddressPool *SubResource `json:"backendAddressPool,omitempty"`
+ // BackendHTTPSettings - Reference of backend http setting of application gateway to be used for test probe.
+ BackendHTTPSettings *SubResource `json:"backendHttpSettings,omitempty"`
}
// ApplicationGatewayPathRule path rule of URL path map of an application gateway.
@@ -4885,6 +4987,77 @@ type ApplicationGatewayWebApplicationFirewallConfiguration struct {
Exclusions *[]ApplicationGatewayFirewallExclusion `json:"exclusions,omitempty"`
}
+// ApplicationRuleCondition rule condition of type application.
+type ApplicationRuleCondition struct {
+ // SourceAddresses - List of source IP addresses for this rule.
+ SourceAddresses *[]string `json:"sourceAddresses,omitempty"`
+ // DestinationAddresses - List of destination IP addresses or Service Tags.
+ DestinationAddresses *[]string `json:"destinationAddresses,omitempty"`
+ // Protocols - Array of Application Protocols.
+ Protocols *[]FirewallPolicyRuleConditionApplicationProtocol `json:"protocols,omitempty"`
+ // TargetFqdns - List of FQDNs for this rule condition.
+ TargetFqdns *[]string `json:"targetFqdns,omitempty"`
+ // FqdnTags - List of FQDN Tags for this rule condition.
+ FqdnTags *[]string `json:"fqdnTags,omitempty"`
+ // Name - Name of the rule condition.
+ Name *string `json:"name,omitempty"`
+ // Description - Description of the rule condition.
+ Description *string `json:"description,omitempty"`
+ // RuleConditionType - Possible values include: 'RuleConditionTypeFirewallPolicyRuleCondition', 'RuleConditionTypeApplicationRuleCondition', 'RuleConditionTypeNetworkRuleCondition'
+ RuleConditionType RuleConditionType `json:"ruleConditionType,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ApplicationRuleCondition.
+func (arc ApplicationRuleCondition) MarshalJSON() ([]byte, error) {
+ arc.RuleConditionType = RuleConditionTypeApplicationRuleCondition
+ objectMap := make(map[string]interface{})
+ if arc.SourceAddresses != nil {
+ objectMap["sourceAddresses"] = arc.SourceAddresses
+ }
+ if arc.DestinationAddresses != nil {
+ objectMap["destinationAddresses"] = arc.DestinationAddresses
+ }
+ if arc.Protocols != nil {
+ objectMap["protocols"] = arc.Protocols
+ }
+ if arc.TargetFqdns != nil {
+ objectMap["targetFqdns"] = arc.TargetFqdns
+ }
+ if arc.FqdnTags != nil {
+ objectMap["fqdnTags"] = arc.FqdnTags
+ }
+ if arc.Name != nil {
+ objectMap["name"] = arc.Name
+ }
+ if arc.Description != nil {
+ objectMap["description"] = arc.Description
+ }
+ if arc.RuleConditionType != "" {
+ objectMap["ruleConditionType"] = arc.RuleConditionType
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsApplicationRuleCondition is the BasicFirewallPolicyRuleCondition implementation for ApplicationRuleCondition.
+func (arc ApplicationRuleCondition) AsApplicationRuleCondition() (*ApplicationRuleCondition, bool) {
+ return &arc, true
+}
+
+// AsRuleCondition is the BasicFirewallPolicyRuleCondition implementation for ApplicationRuleCondition.
+func (arc ApplicationRuleCondition) AsRuleCondition() (*RuleCondition, bool) {
+ return nil, false
+}
+
+// AsFirewallPolicyRuleCondition is the BasicFirewallPolicyRuleCondition implementation for ApplicationRuleCondition.
+func (arc ApplicationRuleCondition) AsFirewallPolicyRuleCondition() (*FirewallPolicyRuleCondition, bool) {
+ return nil, false
+}
+
+// AsBasicFirewallPolicyRuleCondition is the BasicFirewallPolicyRuleCondition implementation for ApplicationRuleCondition.
+func (arc ApplicationRuleCondition) AsBasicFirewallPolicyRuleCondition() (BasicFirewallPolicyRuleCondition, bool) {
+ return &arc, true
+}
+
// ApplicationSecurityGroup an application security group in a resource group.
type ApplicationSecurityGroup struct {
autorest.Response `json:"-"`
@@ -6923,6 +7096,18 @@ type AzureFirewallPropertiesFormat struct {
ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
// ThreatIntelMode - The operation mode for Threat Intelligence. Possible values include: 'AzureFirewallThreatIntelModeAlert', 'AzureFirewallThreatIntelModeDeny', 'AzureFirewallThreatIntelModeOff'
ThreatIntelMode AzureFirewallThreatIntelMode `json:"threatIntelMode,omitempty"`
+ // VirtualHub - The virtualHub to which the firewall belongs.
+ VirtualHub *SubResource `json:"virtualHub,omitempty"`
+ // FirewallPolicy - The firewallPolicy associated with this azure firewall.
+ FirewallPolicy *SubResource `json:"firewallPolicy,omitempty"`
+ // HubIPAddresses - READ-ONLY; IP addresses associated with AzureFirewall.
+ HubIPAddresses *HubIPAddresses `json:"hubIpAddresses,omitempty"`
+}
+
+// AzureFirewallPublicIPAddress public IP Address associated with azure firewall.
+type AzureFirewallPublicIPAddress struct {
+ // Address - Public IP Address value.
+ Address *string `json:"address,omitempty"`
}
// AzureFirewallRCAction properties of the AzureFirewallRCAction.
@@ -13226,18 +13411,1058 @@ type ExpressRouteServiceProviderListResult struct {
NextLink *string `json:"nextLink,omitempty"`
}
-// ExpressRouteServiceProviderListResultIterator provides access to a complete listing of
-// ExpressRouteServiceProvider values.
-type ExpressRouteServiceProviderListResultIterator struct {
+// ExpressRouteServiceProviderListResultIterator provides access to a complete listing of
+// ExpressRouteServiceProvider values.
+type ExpressRouteServiceProviderListResultIterator struct {
+ i int
+ page ExpressRouteServiceProviderListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *ExpressRouteServiceProviderListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteServiceProviderListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *ExpressRouteServiceProviderListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter ExpressRouteServiceProviderListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter ExpressRouteServiceProviderListResultIterator) Response() ExpressRouteServiceProviderListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter ExpressRouteServiceProviderListResultIterator) Value() ExpressRouteServiceProvider {
+ if !iter.page.NotDone() {
+ return ExpressRouteServiceProvider{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the ExpressRouteServiceProviderListResultIterator type.
+func NewExpressRouteServiceProviderListResultIterator(page ExpressRouteServiceProviderListResultPage) ExpressRouteServiceProviderListResultIterator {
+ return ExpressRouteServiceProviderListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (ersplr ExpressRouteServiceProviderListResult) IsEmpty() bool {
+ return ersplr.Value == nil || len(*ersplr.Value) == 0
+}
+
+// expressRouteServiceProviderListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (ersplr ExpressRouteServiceProviderListResult) expressRouteServiceProviderListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if ersplr.NextLink == nil || len(to.String(ersplr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(ersplr.NextLink)))
+}
+
+// ExpressRouteServiceProviderListResultPage contains a page of ExpressRouteServiceProvider values.
+type ExpressRouteServiceProviderListResultPage struct {
+ fn func(context.Context, ExpressRouteServiceProviderListResult) (ExpressRouteServiceProviderListResult, error)
+ ersplr ExpressRouteServiceProviderListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *ExpressRouteServiceProviderListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteServiceProviderListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.ersplr)
+ if err != nil {
+ return err
+ }
+ page.ersplr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *ExpressRouteServiceProviderListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page ExpressRouteServiceProviderListResultPage) NotDone() bool {
+ return !page.ersplr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page ExpressRouteServiceProviderListResultPage) Response() ExpressRouteServiceProviderListResult {
+ return page.ersplr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page ExpressRouteServiceProviderListResultPage) Values() []ExpressRouteServiceProvider {
+ if page.ersplr.IsEmpty() {
+ return nil
+ }
+ return *page.ersplr.Value
+}
+
+// Creates a new instance of the ExpressRouteServiceProviderListResultPage type.
+func NewExpressRouteServiceProviderListResultPage(getNextPage func(context.Context, ExpressRouteServiceProviderListResult) (ExpressRouteServiceProviderListResult, error)) ExpressRouteServiceProviderListResultPage {
+ return ExpressRouteServiceProviderListResultPage{fn: getNextPage}
+}
+
+// ExpressRouteServiceProviderPropertiesFormat properties of ExpressRouteServiceProvider.
+type ExpressRouteServiceProviderPropertiesFormat struct {
+ // PeeringLocations - Get a list of peering locations.
+ PeeringLocations *[]string `json:"peeringLocations,omitempty"`
+ // BandwidthsOffered - Gets bandwidths offered.
+ BandwidthsOffered *[]ExpressRouteServiceProviderBandwidthsOffered `json:"bandwidthsOffered,omitempty"`
+ // ProvisioningState - Gets the provisioning state of the resource.
+ ProvisioningState *string `json:"provisioningState,omitempty"`
+}
+
+// FirewallPoliciesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type FirewallPoliciesCreateOrUpdateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *FirewallPoliciesCreateOrUpdateFuture) Result(client FirewallPoliciesClient) (fp FirewallPolicy, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.FirewallPoliciesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("network.FirewallPoliciesCreateOrUpdateFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if fp.Response.Response, err = future.GetResult(sender); err == nil && fp.Response.Response.StatusCode != http.StatusNoContent {
+ fp, err = client.CreateOrUpdateResponder(fp.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.FirewallPoliciesCreateOrUpdateFuture", "Result", fp.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// FirewallPoliciesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type FirewallPoliciesDeleteFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *FirewallPoliciesDeleteFuture) Result(client FirewallPoliciesClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.FirewallPoliciesDeleteFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("network.FirewallPoliciesDeleteFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// FirewallPolicy firewallPolicy Resource.
+type FirewallPolicy struct {
+ autorest.Response `json:"-"`
+ // FirewallPolicyPropertiesFormat - Properties of the firewall policy.
+ *FirewallPolicyPropertiesFormat `json:"properties,omitempty"`
+ // Etag - READ-ONLY; Gets a unique read-only string that changes whenever the resource is updated.
+ Etag *string `json:"etag,omitempty"`
+ // ID - Resource ID.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Resource name.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Resource type.
+ Type *string `json:"type,omitempty"`
+ // Location - Resource location.
+ Location *string `json:"location,omitempty"`
+ // Tags - Resource tags.
+ Tags map[string]*string `json:"tags"`
+}
+
+// MarshalJSON is the custom marshaler for FirewallPolicy.
+func (fp FirewallPolicy) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if fp.FirewallPolicyPropertiesFormat != nil {
+ objectMap["properties"] = fp.FirewallPolicyPropertiesFormat
+ }
+ if fp.ID != nil {
+ objectMap["id"] = fp.ID
+ }
+ if fp.Location != nil {
+ objectMap["location"] = fp.Location
+ }
+ if fp.Tags != nil {
+ objectMap["tags"] = fp.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for FirewallPolicy struct.
+func (fp *FirewallPolicy) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var firewallPolicyPropertiesFormat FirewallPolicyPropertiesFormat
+ err = json.Unmarshal(*v, &firewallPolicyPropertiesFormat)
+ if err != nil {
+ return err
+ }
+ fp.FirewallPolicyPropertiesFormat = &firewallPolicyPropertiesFormat
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ fp.Etag = &etag
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ fp.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ fp.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ fp.Type = &typeVar
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ fp.Location = &location
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ fp.Tags = tags
+ }
+ }
+ }
+
+ return nil
+}
+
+// FirewallPolicyFilterRule firewall Policy Filter Rule
+type FirewallPolicyFilterRule struct {
+ // Action - The action type of a Filter rule
+ Action *FirewallPolicyFilterRuleAction `json:"action,omitempty"`
+ // RuleConditions - Collection of rule conditions used by a rule.
+ RuleConditions *[]BasicFirewallPolicyRuleCondition `json:"ruleConditions,omitempty"`
+ // Name - Name of the Rule
+ Name *string `json:"name,omitempty"`
+ // Priority - Priority of the Firewall Policy Rule resource.
+ Priority *int32 `json:"priority,omitempty"`
+ // RuleType - Possible values include: 'RuleTypeFirewallPolicyRule', 'RuleTypeFirewallPolicyNatRule', 'RuleTypeFirewallPolicyFilterRule'
+ RuleType RuleType `json:"ruleType,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for FirewallPolicyFilterRule.
+func (fpfr FirewallPolicyFilterRule) MarshalJSON() ([]byte, error) {
+ fpfr.RuleType = RuleTypeFirewallPolicyFilterRule
+ objectMap := make(map[string]interface{})
+ if fpfr.Action != nil {
+ objectMap["action"] = fpfr.Action
+ }
+ if fpfr.RuleConditions != nil {
+ objectMap["ruleConditions"] = fpfr.RuleConditions
+ }
+ if fpfr.Name != nil {
+ objectMap["name"] = fpfr.Name
+ }
+ if fpfr.Priority != nil {
+ objectMap["priority"] = fpfr.Priority
+ }
+ if fpfr.RuleType != "" {
+ objectMap["ruleType"] = fpfr.RuleType
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsFirewallPolicyNatRule is the BasicFirewallPolicyRule implementation for FirewallPolicyFilterRule.
+func (fpfr FirewallPolicyFilterRule) AsFirewallPolicyNatRule() (*FirewallPolicyNatRule, bool) {
+ return nil, false
+}
+
+// AsFirewallPolicyFilterRule is the BasicFirewallPolicyRule implementation for FirewallPolicyFilterRule.
+func (fpfr FirewallPolicyFilterRule) AsFirewallPolicyFilterRule() (*FirewallPolicyFilterRule, bool) {
+ return &fpfr, true
+}
+
+// AsFirewallPolicyRule is the BasicFirewallPolicyRule implementation for FirewallPolicyFilterRule.
+func (fpfr FirewallPolicyFilterRule) AsFirewallPolicyRule() (*FirewallPolicyRule, bool) {
+ return nil, false
+}
+
+// AsBasicFirewallPolicyRule is the BasicFirewallPolicyRule implementation for FirewallPolicyFilterRule.
+func (fpfr FirewallPolicyFilterRule) AsBasicFirewallPolicyRule() (BasicFirewallPolicyRule, bool) {
+ return &fpfr, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for FirewallPolicyFilterRule struct.
+func (fpfr *FirewallPolicyFilterRule) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "action":
+ if v != nil {
+ var action FirewallPolicyFilterRuleAction
+ err = json.Unmarshal(*v, &action)
+ if err != nil {
+ return err
+ }
+ fpfr.Action = &action
+ }
+ case "ruleConditions":
+ if v != nil {
+ ruleConditions, err := unmarshalBasicFirewallPolicyRuleConditionArray(*v)
+ if err != nil {
+ return err
+ }
+ fpfr.RuleConditions = &ruleConditions
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ fpfr.Name = &name
+ }
+ case "priority":
+ if v != nil {
+ var priority int32
+ err = json.Unmarshal(*v, &priority)
+ if err != nil {
+ return err
+ }
+ fpfr.Priority = &priority
+ }
+ case "ruleType":
+ if v != nil {
+ var ruleType RuleType
+ err = json.Unmarshal(*v, &ruleType)
+ if err != nil {
+ return err
+ }
+ fpfr.RuleType = ruleType
+ }
+ }
+ }
+
+ return nil
+}
+
+// FirewallPolicyFilterRuleAction properties of the FirewallPolicyFilterRuleAction.
+type FirewallPolicyFilterRuleAction struct {
+ // Type - The type of action. Possible values include: 'FirewallPolicyFilterRuleActionTypeAllow', 'FirewallPolicyFilterRuleActionTypeDeny', 'FirewallPolicyFilterRuleActionTypeAlert'
+ Type FirewallPolicyFilterRuleActionType `json:"type,omitempty"`
+}
+
+// FirewallPolicyListResult response for ListFirewallPolicies API service call.
+type FirewallPolicyListResult struct {
+ autorest.Response `json:"-"`
+ // Value - List of Firewall Policies in a resource group.
+ Value *[]FirewallPolicy `json:"value,omitempty"`
+ // NextLink - URL to get the next set of results.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// FirewallPolicyListResultIterator provides access to a complete listing of FirewallPolicy values.
+type FirewallPolicyListResultIterator struct {
+ i int
+ page FirewallPolicyListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *FirewallPolicyListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPolicyListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *FirewallPolicyListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter FirewallPolicyListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter FirewallPolicyListResultIterator) Response() FirewallPolicyListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter FirewallPolicyListResultIterator) Value() FirewallPolicy {
+ if !iter.page.NotDone() {
+ return FirewallPolicy{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the FirewallPolicyListResultIterator type.
+func NewFirewallPolicyListResultIterator(page FirewallPolicyListResultPage) FirewallPolicyListResultIterator {
+ return FirewallPolicyListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (fplr FirewallPolicyListResult) IsEmpty() bool {
+ return fplr.Value == nil || len(*fplr.Value) == 0
+}
+
+// firewallPolicyListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (fplr FirewallPolicyListResult) firewallPolicyListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if fplr.NextLink == nil || len(to.String(fplr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(fplr.NextLink)))
+}
+
+// FirewallPolicyListResultPage contains a page of FirewallPolicy values.
+type FirewallPolicyListResultPage struct {
+ fn func(context.Context, FirewallPolicyListResult) (FirewallPolicyListResult, error)
+ fplr FirewallPolicyListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *FirewallPolicyListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPolicyListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.fplr)
+ if err != nil {
+ return err
+ }
+ page.fplr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *FirewallPolicyListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page FirewallPolicyListResultPage) NotDone() bool {
+ return !page.fplr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page FirewallPolicyListResultPage) Response() FirewallPolicyListResult {
+ return page.fplr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page FirewallPolicyListResultPage) Values() []FirewallPolicy {
+ if page.fplr.IsEmpty() {
+ return nil
+ }
+ return *page.fplr.Value
+}
+
+// Creates a new instance of the FirewallPolicyListResultPage type.
+func NewFirewallPolicyListResultPage(getNextPage func(context.Context, FirewallPolicyListResult) (FirewallPolicyListResult, error)) FirewallPolicyListResultPage {
+ return FirewallPolicyListResultPage{fn: getNextPage}
+}
+
+// FirewallPolicyNatRule firewall Policy NAT Rule
+type FirewallPolicyNatRule struct {
+ // Action - The action type of a Nat rule, SNAT or DNAT
+ Action *FirewallPolicyNatRuleAction `json:"action,omitempty"`
+ // TranslatedAddress - The translated address for this NAT rule.
+ TranslatedAddress *string `json:"translatedAddress,omitempty"`
+ // TranslatedPort - The translated port for this NAT rule.
+ TranslatedPort *string `json:"translatedPort,omitempty"`
+ // RuleCondition - The match conditions for incoming traffic
+ RuleCondition BasicFirewallPolicyRuleCondition `json:"ruleCondition,omitempty"`
+ // Name - Name of the Rule
+ Name *string `json:"name,omitempty"`
+ // Priority - Priority of the Firewall Policy Rule resource.
+ Priority *int32 `json:"priority,omitempty"`
+ // RuleType - Possible values include: 'RuleTypeFirewallPolicyRule', 'RuleTypeFirewallPolicyNatRule', 'RuleTypeFirewallPolicyFilterRule'
+ RuleType RuleType `json:"ruleType,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for FirewallPolicyNatRule.
+func (fpnr FirewallPolicyNatRule) MarshalJSON() ([]byte, error) {
+ fpnr.RuleType = RuleTypeFirewallPolicyNatRule
+ objectMap := make(map[string]interface{})
+ if fpnr.Action != nil {
+ objectMap["action"] = fpnr.Action
+ }
+ if fpnr.TranslatedAddress != nil {
+ objectMap["translatedAddress"] = fpnr.TranslatedAddress
+ }
+ if fpnr.TranslatedPort != nil {
+ objectMap["translatedPort"] = fpnr.TranslatedPort
+ }
+ objectMap["ruleCondition"] = fpnr.RuleCondition
+ if fpnr.Name != nil {
+ objectMap["name"] = fpnr.Name
+ }
+ if fpnr.Priority != nil {
+ objectMap["priority"] = fpnr.Priority
+ }
+ if fpnr.RuleType != "" {
+ objectMap["ruleType"] = fpnr.RuleType
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsFirewallPolicyNatRule is the BasicFirewallPolicyRule implementation for FirewallPolicyNatRule.
+func (fpnr FirewallPolicyNatRule) AsFirewallPolicyNatRule() (*FirewallPolicyNatRule, bool) {
+ return &fpnr, true
+}
+
+// AsFirewallPolicyFilterRule is the BasicFirewallPolicyRule implementation for FirewallPolicyNatRule.
+func (fpnr FirewallPolicyNatRule) AsFirewallPolicyFilterRule() (*FirewallPolicyFilterRule, bool) {
+ return nil, false
+}
+
+// AsFirewallPolicyRule is the BasicFirewallPolicyRule implementation for FirewallPolicyNatRule.
+func (fpnr FirewallPolicyNatRule) AsFirewallPolicyRule() (*FirewallPolicyRule, bool) {
+ return nil, false
+}
+
+// AsBasicFirewallPolicyRule is the BasicFirewallPolicyRule implementation for FirewallPolicyNatRule.
+func (fpnr FirewallPolicyNatRule) AsBasicFirewallPolicyRule() (BasicFirewallPolicyRule, bool) {
+ return &fpnr, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for FirewallPolicyNatRule struct.
+func (fpnr *FirewallPolicyNatRule) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "action":
+ if v != nil {
+ var action FirewallPolicyNatRuleAction
+ err = json.Unmarshal(*v, &action)
+ if err != nil {
+ return err
+ }
+ fpnr.Action = &action
+ }
+ case "translatedAddress":
+ if v != nil {
+ var translatedAddress string
+ err = json.Unmarshal(*v, &translatedAddress)
+ if err != nil {
+ return err
+ }
+ fpnr.TranslatedAddress = &translatedAddress
+ }
+ case "translatedPort":
+ if v != nil {
+ var translatedPort string
+ err = json.Unmarshal(*v, &translatedPort)
+ if err != nil {
+ return err
+ }
+ fpnr.TranslatedPort = &translatedPort
+ }
+ case "ruleCondition":
+ if v != nil {
+ ruleCondition, err := unmarshalBasicFirewallPolicyRuleCondition(*v)
+ if err != nil {
+ return err
+ }
+ fpnr.RuleCondition = ruleCondition
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ fpnr.Name = &name
+ }
+ case "priority":
+ if v != nil {
+ var priority int32
+ err = json.Unmarshal(*v, &priority)
+ if err != nil {
+ return err
+ }
+ fpnr.Priority = &priority
+ }
+ case "ruleType":
+ if v != nil {
+ var ruleType RuleType
+ err = json.Unmarshal(*v, &ruleType)
+ if err != nil {
+ return err
+ }
+ fpnr.RuleType = ruleType
+ }
+ }
+ }
+
+ return nil
+}
+
+// FirewallPolicyNatRuleAction properties of the FirewallPolicyNatRuleAction.
+type FirewallPolicyNatRuleAction struct {
+ // Type - The type of action. Possible values include: 'DNAT', 'SNAT'
+ Type FirewallPolicyNatRuleActionType `json:"type,omitempty"`
+}
+
+// FirewallPolicyPropertiesFormat firewall Policy definition
+type FirewallPolicyPropertiesFormat struct {
+ // RuleGroups - READ-ONLY; List of references to FirewallPolicyRuleGroups
+ RuleGroups *[]SubResource `json:"ruleGroups,omitempty"`
+ // ProvisioningState - The provisioning state of the resource. Possible values include: 'Succeeded', 'Updating', 'Deleting', 'Failed'
+ ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
+ // BasePolicy - The parent firewall policy from which rules are inherited.
+ BasePolicy *SubResource `json:"basePolicy,omitempty"`
+ // Firewalls - READ-ONLY; List of references to Azure Firewalls that this Firewall Policy is associated with
+ Firewalls *[]SubResource `json:"firewalls,omitempty"`
+ // ChildPolicies - READ-ONLY; List of references to Child Firewall Policies
+ ChildPolicies *[]SubResource `json:"childPolicies,omitempty"`
+ // ThreatIntelMode - The operation mode for Threat Intelligence. Possible values include: 'AzureFirewallThreatIntelModeAlert', 'AzureFirewallThreatIntelModeDeny', 'AzureFirewallThreatIntelModeOff'
+ ThreatIntelMode AzureFirewallThreatIntelMode `json:"threatIntelMode,omitempty"`
+}
+
+// BasicFirewallPolicyRule properties of the rule.
+type BasicFirewallPolicyRule interface {
+ AsFirewallPolicyNatRule() (*FirewallPolicyNatRule, bool)
+ AsFirewallPolicyFilterRule() (*FirewallPolicyFilterRule, bool)
+ AsFirewallPolicyRule() (*FirewallPolicyRule, bool)
+}
+
+// FirewallPolicyRule properties of the rule.
+type FirewallPolicyRule struct {
+ // Name - Name of the Rule
+ Name *string `json:"name,omitempty"`
+ // Priority - Priority of the Firewall Policy Rule resource.
+ Priority *int32 `json:"priority,omitempty"`
+ // RuleType - Possible values include: 'RuleTypeFirewallPolicyRule', 'RuleTypeFirewallPolicyNatRule', 'RuleTypeFirewallPolicyFilterRule'
+ RuleType RuleType `json:"ruleType,omitempty"`
+}
+
+func unmarshalBasicFirewallPolicyRule(body []byte) (BasicFirewallPolicyRule, error) {
+ var m map[string]interface{}
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return nil, err
+ }
+
+ switch m["ruleType"] {
+ case string(RuleTypeFirewallPolicyNatRule):
+ var fpnr FirewallPolicyNatRule
+ err := json.Unmarshal(body, &fpnr)
+ return fpnr, err
+ case string(RuleTypeFirewallPolicyFilterRule):
+ var fpfr FirewallPolicyFilterRule
+ err := json.Unmarshal(body, &fpfr)
+ return fpfr, err
+ default:
+ var fpr FirewallPolicyRule
+ err := json.Unmarshal(body, &fpr)
+ return fpr, err
+ }
+}
+func unmarshalBasicFirewallPolicyRuleArray(body []byte) ([]BasicFirewallPolicyRule, error) {
+ var rawMessages []*json.RawMessage
+ err := json.Unmarshal(body, &rawMessages)
+ if err != nil {
+ return nil, err
+ }
+
+ fprArray := make([]BasicFirewallPolicyRule, len(rawMessages))
+
+ for index, rawMessage := range rawMessages {
+ fpr, err := unmarshalBasicFirewallPolicyRule(*rawMessage)
+ if err != nil {
+ return nil, err
+ }
+ fprArray[index] = fpr
+ }
+ return fprArray, nil
+}
+
+// MarshalJSON is the custom marshaler for FirewallPolicyRule.
+func (fpr FirewallPolicyRule) MarshalJSON() ([]byte, error) {
+ fpr.RuleType = RuleTypeFirewallPolicyRule
+ objectMap := make(map[string]interface{})
+ if fpr.Name != nil {
+ objectMap["name"] = fpr.Name
+ }
+ if fpr.Priority != nil {
+ objectMap["priority"] = fpr.Priority
+ }
+ if fpr.RuleType != "" {
+ objectMap["ruleType"] = fpr.RuleType
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsFirewallPolicyNatRule is the BasicFirewallPolicyRule implementation for FirewallPolicyRule.
+func (fpr FirewallPolicyRule) AsFirewallPolicyNatRule() (*FirewallPolicyNatRule, bool) {
+ return nil, false
+}
+
+// AsFirewallPolicyFilterRule is the BasicFirewallPolicyRule implementation for FirewallPolicyRule.
+func (fpr FirewallPolicyRule) AsFirewallPolicyFilterRule() (*FirewallPolicyFilterRule, bool) {
+ return nil, false
+}
+
+// AsFirewallPolicyRule is the BasicFirewallPolicyRule implementation for FirewallPolicyRule.
+func (fpr FirewallPolicyRule) AsFirewallPolicyRule() (*FirewallPolicyRule, bool) {
+ return &fpr, true
+}
+
+// AsBasicFirewallPolicyRule is the BasicFirewallPolicyRule implementation for FirewallPolicyRule.
+func (fpr FirewallPolicyRule) AsBasicFirewallPolicyRule() (BasicFirewallPolicyRule, bool) {
+ return &fpr, true
+}
+
+// BasicFirewallPolicyRuleCondition properties of a rule.
+type BasicFirewallPolicyRuleCondition interface {
+ AsApplicationRuleCondition() (*ApplicationRuleCondition, bool)
+ AsRuleCondition() (*RuleCondition, bool)
+ AsFirewallPolicyRuleCondition() (*FirewallPolicyRuleCondition, bool)
+}
+
+// FirewallPolicyRuleCondition properties of a rule.
+type FirewallPolicyRuleCondition struct {
+ // Name - Name of the rule condition.
+ Name *string `json:"name,omitempty"`
+ // Description - Description of the rule condition.
+ Description *string `json:"description,omitempty"`
+ // RuleConditionType - Possible values include: 'RuleConditionTypeFirewallPolicyRuleCondition', 'RuleConditionTypeApplicationRuleCondition', 'RuleConditionTypeNetworkRuleCondition'
+ RuleConditionType RuleConditionType `json:"ruleConditionType,omitempty"`
+}
+
+func unmarshalBasicFirewallPolicyRuleCondition(body []byte) (BasicFirewallPolicyRuleCondition, error) {
+ var m map[string]interface{}
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return nil, err
+ }
+
+ switch m["ruleConditionType"] {
+ case string(RuleConditionTypeApplicationRuleCondition):
+ var arc ApplicationRuleCondition
+ err := json.Unmarshal(body, &arc)
+ return arc, err
+ case string(RuleConditionTypeNetworkRuleCondition):
+ var rc RuleCondition
+ err := json.Unmarshal(body, &rc)
+ return rc, err
+ default:
+ var fprc FirewallPolicyRuleCondition
+ err := json.Unmarshal(body, &fprc)
+ return fprc, err
+ }
+}
+func unmarshalBasicFirewallPolicyRuleConditionArray(body []byte) ([]BasicFirewallPolicyRuleCondition, error) {
+ var rawMessages []*json.RawMessage
+ err := json.Unmarshal(body, &rawMessages)
+ if err != nil {
+ return nil, err
+ }
+
+ fprcArray := make([]BasicFirewallPolicyRuleCondition, len(rawMessages))
+
+ for index, rawMessage := range rawMessages {
+ fprc, err := unmarshalBasicFirewallPolicyRuleCondition(*rawMessage)
+ if err != nil {
+ return nil, err
+ }
+ fprcArray[index] = fprc
+ }
+ return fprcArray, nil
+}
+
+// MarshalJSON is the custom marshaler for FirewallPolicyRuleCondition.
+func (fprc FirewallPolicyRuleCondition) MarshalJSON() ([]byte, error) {
+ fprc.RuleConditionType = RuleConditionTypeFirewallPolicyRuleCondition
+ objectMap := make(map[string]interface{})
+ if fprc.Name != nil {
+ objectMap["name"] = fprc.Name
+ }
+ if fprc.Description != nil {
+ objectMap["description"] = fprc.Description
+ }
+ if fprc.RuleConditionType != "" {
+ objectMap["ruleConditionType"] = fprc.RuleConditionType
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsApplicationRuleCondition is the BasicFirewallPolicyRuleCondition implementation for FirewallPolicyRuleCondition.
+func (fprc FirewallPolicyRuleCondition) AsApplicationRuleCondition() (*ApplicationRuleCondition, bool) {
+ return nil, false
+}
+
+// AsRuleCondition is the BasicFirewallPolicyRuleCondition implementation for FirewallPolicyRuleCondition.
+func (fprc FirewallPolicyRuleCondition) AsRuleCondition() (*RuleCondition, bool) {
+ return nil, false
+}
+
+// AsFirewallPolicyRuleCondition is the BasicFirewallPolicyRuleCondition implementation for FirewallPolicyRuleCondition.
+func (fprc FirewallPolicyRuleCondition) AsFirewallPolicyRuleCondition() (*FirewallPolicyRuleCondition, bool) {
+ return &fprc, true
+}
+
+// AsBasicFirewallPolicyRuleCondition is the BasicFirewallPolicyRuleCondition implementation for FirewallPolicyRuleCondition.
+func (fprc FirewallPolicyRuleCondition) AsBasicFirewallPolicyRuleCondition() (BasicFirewallPolicyRuleCondition, bool) {
+ return &fprc, true
+}
+
+// FirewallPolicyRuleConditionApplicationProtocol properties of the application rule protocol.
+type FirewallPolicyRuleConditionApplicationProtocol struct {
+ // ProtocolType - Protocol type. Possible values include: 'FirewallPolicyRuleConditionApplicationProtocolTypeHTTP', 'FirewallPolicyRuleConditionApplicationProtocolTypeHTTPS'
+ ProtocolType FirewallPolicyRuleConditionApplicationProtocolType `json:"protocolType,omitempty"`
+ // Port - Port number for the protocol, cannot be greater than 64000.
+ Port *int32 `json:"port,omitempty"`
+}
+
+// FirewallPolicyRuleGroup rule Group resource
+type FirewallPolicyRuleGroup struct {
+ autorest.Response `json:"-"`
+ // FirewallPolicyRuleGroupProperties - The properties of the firewall policy rule group.
+ *FirewallPolicyRuleGroupProperties `json:"properties,omitempty"`
+ // Name - Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
+ Name *string `json:"name,omitempty"`
+ // Etag - READ-ONLY; Gets a unique read-only string that changes whenever the resource is updated.
+ Etag *string `json:"etag,omitempty"`
+ // Type - READ-ONLY; Rule Group type.
+ Type *string `json:"type,omitempty"`
+ // ID - Resource ID.
+ ID *string `json:"id,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for FirewallPolicyRuleGroup.
+func (fprg FirewallPolicyRuleGroup) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if fprg.FirewallPolicyRuleGroupProperties != nil {
+ objectMap["properties"] = fprg.FirewallPolicyRuleGroupProperties
+ }
+ if fprg.Name != nil {
+ objectMap["name"] = fprg.Name
+ }
+ if fprg.ID != nil {
+ objectMap["id"] = fprg.ID
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for FirewallPolicyRuleGroup struct.
+func (fprg *FirewallPolicyRuleGroup) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var firewallPolicyRuleGroupProperties FirewallPolicyRuleGroupProperties
+ err = json.Unmarshal(*v, &firewallPolicyRuleGroupProperties)
+ if err != nil {
+ return err
+ }
+ fprg.FirewallPolicyRuleGroupProperties = &firewallPolicyRuleGroupProperties
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ fprg.Name = &name
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ fprg.Etag = &etag
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ fprg.Type = &typeVar
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ fprg.ID = &ID
+ }
+ }
+ }
+
+ return nil
+}
+
+// FirewallPolicyRuleGroupListResult response for ListFirewallPolicyRuleGroups API service call.
+type FirewallPolicyRuleGroupListResult struct {
+ autorest.Response `json:"-"`
+ // Value - List of FirewallPolicyRuleGroups in a FirewallPolicy.
+ Value *[]FirewallPolicyRuleGroup `json:"value,omitempty"`
+ // NextLink - URL to get the next set of results.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// FirewallPolicyRuleGroupListResultIterator provides access to a complete listing of
+// FirewallPolicyRuleGroup values.
+type FirewallPolicyRuleGroupListResultIterator struct {
i int
- page ExpressRouteServiceProviderListResultPage
+ page FirewallPolicyRuleGroupListResultPage
}
// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
-func (iter *ExpressRouteServiceProviderListResultIterator) NextWithContext(ctx context.Context) (err error) {
+func (iter *FirewallPolicyRuleGroupListResultIterator) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteServiceProviderListResultIterator.NextWithContext")
+ ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPolicyRuleGroupListResultIterator.NextWithContext")
defer func() {
sc := -1
if iter.Response().Response.Response != nil {
@@ -13262,62 +14487,62 @@ func (iter *ExpressRouteServiceProviderListResultIterator) NextWithContext(ctx c
// Next advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
-func (iter *ExpressRouteServiceProviderListResultIterator) Next() error {
+func (iter *FirewallPolicyRuleGroupListResultIterator) Next() error {
return iter.NextWithContext(context.Background())
}
// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter ExpressRouteServiceProviderListResultIterator) NotDone() bool {
+func (iter FirewallPolicyRuleGroupListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
}
// Response returns the raw server response from the last page request.
-func (iter ExpressRouteServiceProviderListResultIterator) Response() ExpressRouteServiceProviderListResult {
+func (iter FirewallPolicyRuleGroupListResultIterator) Response() FirewallPolicyRuleGroupListResult {
return iter.page.Response()
}
// Value returns the current value or a zero-initialized value if the
// iterator has advanced beyond the end of the collection.
-func (iter ExpressRouteServiceProviderListResultIterator) Value() ExpressRouteServiceProvider {
+func (iter FirewallPolicyRuleGroupListResultIterator) Value() FirewallPolicyRuleGroup {
if !iter.page.NotDone() {
- return ExpressRouteServiceProvider{}
+ return FirewallPolicyRuleGroup{}
}
return iter.page.Values()[iter.i]
}
-// Creates a new instance of the ExpressRouteServiceProviderListResultIterator type.
-func NewExpressRouteServiceProviderListResultIterator(page ExpressRouteServiceProviderListResultPage) ExpressRouteServiceProviderListResultIterator {
- return ExpressRouteServiceProviderListResultIterator{page: page}
+// Creates a new instance of the FirewallPolicyRuleGroupListResultIterator type.
+func NewFirewallPolicyRuleGroupListResultIterator(page FirewallPolicyRuleGroupListResultPage) FirewallPolicyRuleGroupListResultIterator {
+ return FirewallPolicyRuleGroupListResultIterator{page: page}
}
// IsEmpty returns true if the ListResult contains no values.
-func (ersplr ExpressRouteServiceProviderListResult) IsEmpty() bool {
- return ersplr.Value == nil || len(*ersplr.Value) == 0
+func (fprglr FirewallPolicyRuleGroupListResult) IsEmpty() bool {
+ return fprglr.Value == nil || len(*fprglr.Value) == 0
}
-// expressRouteServiceProviderListResultPreparer prepares a request to retrieve the next set of results.
+// firewallPolicyRuleGroupListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
-func (ersplr ExpressRouteServiceProviderListResult) expressRouteServiceProviderListResultPreparer(ctx context.Context) (*http.Request, error) {
- if ersplr.NextLink == nil || len(to.String(ersplr.NextLink)) < 1 {
+func (fprglr FirewallPolicyRuleGroupListResult) firewallPolicyRuleGroupListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if fprglr.NextLink == nil || len(to.String(fprglr.NextLink)) < 1 {
return nil, nil
}
return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
- autorest.WithBaseURL(to.String(ersplr.NextLink)))
+ autorest.WithBaseURL(to.String(fprglr.NextLink)))
}
-// ExpressRouteServiceProviderListResultPage contains a page of ExpressRouteServiceProvider values.
-type ExpressRouteServiceProviderListResultPage struct {
- fn func(context.Context, ExpressRouteServiceProviderListResult) (ExpressRouteServiceProviderListResult, error)
- ersplr ExpressRouteServiceProviderListResult
+// FirewallPolicyRuleGroupListResultPage contains a page of FirewallPolicyRuleGroup values.
+type FirewallPolicyRuleGroupListResultPage struct {
+ fn func(context.Context, FirewallPolicyRuleGroupListResult) (FirewallPolicyRuleGroupListResult, error)
+ fprglr FirewallPolicyRuleGroupListResult
}
// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
-func (page *ExpressRouteServiceProviderListResultPage) NextWithContext(ctx context.Context) (err error) {
+func (page *FirewallPolicyRuleGroupListResultPage) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteServiceProviderListResultPage.NextWithContext")
+ ctx = tracing.StartSpan(ctx, fqdn+"/FirewallPolicyRuleGroupListResultPage.NextWithContext")
defer func() {
sc := -1
if page.Response().Response.Response != nil {
@@ -13326,52 +14551,145 @@ func (page *ExpressRouteServiceProviderListResultPage) NextWithContext(ctx conte
tracing.EndSpan(ctx, sc, err)
}()
}
- next, err := page.fn(ctx, page.ersplr)
+ next, err := page.fn(ctx, page.fprglr)
if err != nil {
return err
}
- page.ersplr = next
+ page.fprglr = next
return nil
}
// Next advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
-func (page *ExpressRouteServiceProviderListResultPage) Next() error {
+func (page *FirewallPolicyRuleGroupListResultPage) Next() error {
return page.NextWithContext(context.Background())
}
// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page ExpressRouteServiceProviderListResultPage) NotDone() bool {
- return !page.ersplr.IsEmpty()
+func (page FirewallPolicyRuleGroupListResultPage) NotDone() bool {
+ return !page.fprglr.IsEmpty()
}
// Response returns the raw server response from the last page request.
-func (page ExpressRouteServiceProviderListResultPage) Response() ExpressRouteServiceProviderListResult {
- return page.ersplr
+func (page FirewallPolicyRuleGroupListResultPage) Response() FirewallPolicyRuleGroupListResult {
+ return page.fprglr
}
// Values returns the slice of values for the current page or nil if there are no values.
-func (page ExpressRouteServiceProviderListResultPage) Values() []ExpressRouteServiceProvider {
- if page.ersplr.IsEmpty() {
+func (page FirewallPolicyRuleGroupListResultPage) Values() []FirewallPolicyRuleGroup {
+ if page.fprglr.IsEmpty() {
return nil
}
- return *page.ersplr.Value
+ return *page.fprglr.Value
}
-// Creates a new instance of the ExpressRouteServiceProviderListResultPage type.
-func NewExpressRouteServiceProviderListResultPage(getNextPage func(context.Context, ExpressRouteServiceProviderListResult) (ExpressRouteServiceProviderListResult, error)) ExpressRouteServiceProviderListResultPage {
- return ExpressRouteServiceProviderListResultPage{fn: getNextPage}
+// Creates a new instance of the FirewallPolicyRuleGroupListResultPage type.
+func NewFirewallPolicyRuleGroupListResultPage(getNextPage func(context.Context, FirewallPolicyRuleGroupListResult) (FirewallPolicyRuleGroupListResult, error)) FirewallPolicyRuleGroupListResultPage {
+ return FirewallPolicyRuleGroupListResultPage{fn: getNextPage}
}
-// ExpressRouteServiceProviderPropertiesFormat properties of ExpressRouteServiceProvider.
-type ExpressRouteServiceProviderPropertiesFormat struct {
- // PeeringLocations - Get a list of peering locations.
- PeeringLocations *[]string `json:"peeringLocations,omitempty"`
- // BandwidthsOffered - Gets bandwidths offered.
- BandwidthsOffered *[]ExpressRouteServiceProviderBandwidthsOffered `json:"bandwidthsOffered,omitempty"`
- // ProvisioningState - Gets the provisioning state of the resource.
- ProvisioningState *string `json:"provisioningState,omitempty"`
+// FirewallPolicyRuleGroupProperties properties of the rule group.
+type FirewallPolicyRuleGroupProperties struct {
+ // Priority - Priority of the Firewall Policy Rule Group resource.
+ Priority *int32 `json:"priority,omitempty"`
+ // Rules - Group of Firewall Policy rules.
+ Rules *[]BasicFirewallPolicyRule `json:"rules,omitempty"`
+ // ProvisioningState - The provisioning state of the resource. Possible values include: 'Succeeded', 'Updating', 'Deleting', 'Failed'
+ ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
+}
+
+// UnmarshalJSON is the custom unmarshaler for FirewallPolicyRuleGroupProperties struct.
+func (fprgp *FirewallPolicyRuleGroupProperties) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "priority":
+ if v != nil {
+ var priority int32
+ err = json.Unmarshal(*v, &priority)
+ if err != nil {
+ return err
+ }
+ fprgp.Priority = &priority
+ }
+ case "rules":
+ if v != nil {
+ rules, err := unmarshalBasicFirewallPolicyRuleArray(*v)
+ if err != nil {
+ return err
+ }
+ fprgp.Rules = &rules
+ }
+ case "provisioningState":
+ if v != nil {
+ var provisioningState ProvisioningState
+ err = json.Unmarshal(*v, &provisioningState)
+ if err != nil {
+ return err
+ }
+ fprgp.ProvisioningState = provisioningState
+ }
+ }
+ }
+
+ return nil
+}
+
+// FirewallPolicyRuleGroupsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of
+// a long-running operation.
+type FirewallPolicyRuleGroupsCreateOrUpdateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *FirewallPolicyRuleGroupsCreateOrUpdateFuture) Result(client FirewallPolicyRuleGroupsClient) (fprg FirewallPolicyRuleGroup, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("network.FirewallPolicyRuleGroupsCreateOrUpdateFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if fprg.Response.Response, err = future.GetResult(sender); err == nil && fprg.Response.Response.StatusCode != http.StatusNoContent {
+ fprg, err = client.CreateOrUpdateResponder(fprg.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsCreateOrUpdateFuture", "Result", fprg.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// FirewallPolicyRuleGroupsDeleteFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type FirewallPolicyRuleGroupsDeleteFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *FirewallPolicyRuleGroupsDeleteFuture) Result(client FirewallPolicyRuleGroupsClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.FirewallPolicyRuleGroupsDeleteFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("network.FirewallPolicyRuleGroupsDeleteFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
}
// FlowLogFormatParameters parameters that define the flow log format.
@@ -13653,6 +14971,14 @@ type HTTPHeader struct {
Value *string `json:"value,omitempty"`
}
+// HubIPAddresses IP addresses associated with azure firewall.
+type HubIPAddresses struct {
+ // PublicIPAddresses - List of Public IP addresses associated with azure firewall.
+ PublicIPAddresses *[]AzureFirewallPublicIPAddress `json:"publicIPAddresses,omitempty"`
+ // PrivateIPAddress - Private IP Address associated with azure firewall.
+ PrivateIPAddress *string `json:"privateIPAddress,omitempty"`
+}
+
// HubVirtualNetworkConnection hubVirtualNetworkConnection Resource.
type HubVirtualNetworkConnection struct {
autorest.Response `json:"-"`
@@ -21115,7 +22441,7 @@ type PrivateEndpoint struct {
autorest.Response `json:"-"`
// PrivateEndpointProperties - Properties of the private endpoint.
*PrivateEndpointProperties `json:"properties,omitempty"`
- // Etag - Gets a unique read-only string that changes whenever the resource is updated.
+ // Etag - A unique read-only string that changes whenever the resource is updated.
Etag *string `json:"etag,omitempty"`
// ID - Resource ID.
ID *string `json:"id,omitempty"`
@@ -21235,6 +22561,10 @@ type PrivateEndpointConnection struct {
*PrivateEndpointConnectionProperties `json:"properties,omitempty"`
// Name - The name of the resource that is unique within a resource group. This name can be used to access the resource.
Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The resource type.
+ Type *string `json:"type,omitempty"`
+ // Etag - READ-ONLY; A unique read-only string that changes whenever the resource is updated.
+ Etag *string `json:"etag,omitempty"`
// ID - Resource ID.
ID *string `json:"id,omitempty"`
}
@@ -21281,6 +22611,24 @@ func (pec *PrivateEndpointConnection) UnmarshalJSON(body []byte) error {
}
pec.Name = &name
}
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ pec.Type = &typeVar
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ pec.Etag = &etag
+ }
case "id":
if v != nil {
var ID string
@@ -21302,6 +22650,8 @@ type PrivateEndpointConnectionProperties struct {
PrivateEndpoint *PrivateEndpoint `json:"privateEndpoint,omitempty"`
// PrivateLinkServiceConnectionState - A collection of information about the state of the connection between service consumer and provider.
PrivateLinkServiceConnectionState *PrivateLinkServiceConnectionState `json:"privateLinkServiceConnectionState,omitempty"`
+ // ProvisioningState - The provisioning state of the private endpoint connection. Possible values include: 'Succeeded', 'Updating', 'Deleting', 'Failed'
+ ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
}
// PrivateEndpointListResult response for the ListPrivateEndpoints API service call.
@@ -21456,8 +22806,8 @@ type PrivateEndpointProperties struct {
Subnet *Subnet `json:"subnet,omitempty"`
// NetworkInterfaces - READ-ONLY; Gets an array of references to the network interfaces created for this private endpoint.
NetworkInterfaces *[]Interface `json:"networkInterfaces,omitempty"`
- // ProvisioningState - READ-ONLY; The provisioning state of the private endpoint. Possible values are: 'Updating', 'Deleting', and 'Failed'.
- ProvisioningState *string `json:"provisioningState,omitempty"`
+ // ProvisioningState - The provisioning state of the private endpoint. Possible values include: 'Succeeded', 'Updating', 'Deleting', 'Failed'
+ ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
// PrivateLinkServiceConnections - A grouping of information about the connection to the remote resource.
PrivateLinkServiceConnections *[]PrivateLinkServiceConnection `json:"privateLinkServiceConnections,omitempty"`
// ManualPrivateLinkServiceConnections - A grouping of information about the connection to the remote resource. Used when the network admin does not have access to approve connections to the remote resource.
@@ -21521,7 +22871,7 @@ type PrivateLinkService struct {
autorest.Response `json:"-"`
// PrivateLinkServiceProperties - Properties of the private link service.
*PrivateLinkServiceProperties `json:"properties,omitempty"`
- // Etag - Gets a unique read-only string that changes whenever the resource is updated.
+ // Etag - A unique read-only string that changes whenever the resource is updated.
Etag *string `json:"etag,omitempty"`
// ID - Resource ID.
ID *string `json:"id,omitempty"`
@@ -21640,6 +22990,10 @@ type PrivateLinkServiceConnection struct {
*PrivateLinkServiceConnectionProperties `json:"properties,omitempty"`
// Name - The name of the resource that is unique within a resource group. This name can be used to access the resource.
Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The resource type.
+ Type *string `json:"type,omitempty"`
+ // Etag - READ-ONLY; A unique read-only string that changes whenever the resource is updated.
+ Etag *string `json:"etag,omitempty"`
// ID - Resource ID.
ID *string `json:"id,omitempty"`
}
@@ -21686,6 +23040,24 @@ func (plsc *PrivateLinkServiceConnection) UnmarshalJSON(body []byte) error {
}
plsc.Name = &name
}
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ plsc.Type = &typeVar
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ plsc.Etag = &etag
+ }
case "id":
if v != nil {
var ID string
@@ -21703,6 +23075,8 @@ func (plsc *PrivateLinkServiceConnection) UnmarshalJSON(body []byte) error {
// PrivateLinkServiceConnectionProperties properties of the PrivateLinkServiceConnection.
type PrivateLinkServiceConnectionProperties struct {
+ // ProvisioningState - The provisioning state of the private link service connection. Possible values include: 'Succeeded', 'Updating', 'Deleting', 'Failed'
+ ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
// PrivateLinkServiceID - The resource id of private link service.
PrivateLinkServiceID *string `json:"privateLinkServiceId,omitempty"`
// GroupIds - The ID(s) of the group(s) obtained from the remote resource that this private endpoint should connect to.
@@ -21730,6 +23104,12 @@ type PrivateLinkServiceIPConfiguration struct {
*PrivateLinkServiceIPConfigurationProperties `json:"properties,omitempty"`
// Name - The name of private link service ip configuration.
Name *string `json:"name,omitempty"`
+ // Etag - READ-ONLY; A unique read-only string that changes whenever the resource is updated.
+ Etag *string `json:"etag,omitempty"`
+ // Type - READ-ONLY; The resource type.
+ Type *string `json:"type,omitempty"`
+ // ID - Resource ID.
+ ID *string `json:"id,omitempty"`
}
// MarshalJSON is the custom marshaler for PrivateLinkServiceIPConfiguration.
@@ -21741,6 +23121,9 @@ func (plsic PrivateLinkServiceIPConfiguration) MarshalJSON() ([]byte, error) {
if plsic.Name != nil {
objectMap["name"] = plsic.Name
}
+ if plsic.ID != nil {
+ objectMap["id"] = plsic.ID
+ }
return json.Marshal(objectMap)
}
@@ -21771,6 +23154,33 @@ func (plsic *PrivateLinkServiceIPConfiguration) UnmarshalJSON(body []byte) error
}
plsic.Name = &name
}
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ plsic.Etag = &etag
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ plsic.Type = &typeVar
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ plsic.ID = &ID
+ }
}
}
@@ -21785,10 +23195,10 @@ type PrivateLinkServiceIPConfigurationProperties struct {
PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"`
// Subnet - The reference of the subnet resource.
Subnet *Subnet `json:"subnet,omitempty"`
- // PublicIPAddress - The reference of the public IP resource.
- PublicIPAddress *PublicIPAddress `json:"publicIPAddress,omitempty"`
- // ProvisioningState - Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
- ProvisioningState *string `json:"provisioningState,omitempty"`
+ // Primary - Whether the ip configuration is primary or not.
+ Primary *bool `json:"primary,omitempty"`
+ // ProvisioningState - The provisioning state of the private link service ip configuration. Possible values include: 'Succeeded', 'Updating', 'Deleting', 'Failed'
+ ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
// PrivateIPAddressVersion - Available from Api-Version 2016-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values include: 'IPv4', 'IPv6'
PrivateIPAddressVersion IPVersion `json:"privateIPAddressVersion,omitempty"`
}
@@ -21947,8 +23357,8 @@ type PrivateLinkServiceProperties struct {
IPConfigurations *[]PrivateLinkServiceIPConfiguration `json:"ipConfigurations,omitempty"`
// NetworkInterfaces - READ-ONLY; Gets an array of references to the network interfaces created for this private link service.
NetworkInterfaces *[]Interface `json:"networkInterfaces,omitempty"`
- // ProvisioningState - READ-ONLY; The provisioning state of the private link service. Possible values are: 'Updating', 'Succeeded', and 'Failed'.
- ProvisioningState *string `json:"provisioningState,omitempty"`
+ // ProvisioningState - The provisioning state of the private link service. Possible values include: 'Succeeded', 'Updating', 'Deleting', 'Failed'
+ ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
// PrivateEndpointConnections - An array of list about connections to the private endpoint.
PrivateEndpointConnections *[]PrivateEndpointConnection `json:"privateEndpointConnections,omitempty"`
// Visibility - The visibility list of the private link service.
@@ -23283,6 +24693,8 @@ type PublicIPPrefixPropertiesFormat struct {
IPPrefix *string `json:"ipPrefix,omitempty"`
// PublicIPAddresses - The list of all referenced PublicIPAddresses.
PublicIPAddresses *[]ReferencedPublicIPAddress `json:"publicIPAddresses,omitempty"`
+ // LoadBalancerFrontendIPConfiguration - READ-ONLY; The reference to load balancer frontend IP configuration associated with the public IP prefix.
+ LoadBalancerFrontendIPConfiguration *SubResource `json:"loadBalancerFrontendIpConfiguration,omitempty"`
// ResourceGUID - The resource GUID property of the public IP prefix resource.
ResourceGUID *string `json:"resourceGuid,omitempty"`
// ProvisioningState - The provisioning state of the Public IP prefix resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
@@ -24792,6 +26204,72 @@ func (future *RouteTablesUpdateTagsFuture) Result(client RouteTablesClient) (rt
return
}
+// RuleCondition rule condition of type network
+type RuleCondition struct {
+ // IPProtocols - Array of FirewallPolicyRuleConditionNetworkProtocols.
+ IPProtocols *[]FirewallPolicyRuleConditionNetworkProtocol `json:"ipProtocols,omitempty"`
+ // SourceAddresses - List of source IP addresses for this rule.
+ SourceAddresses *[]string `json:"sourceAddresses,omitempty"`
+ // DestinationAddresses - List of destination IP addresses or Service Tags.
+ DestinationAddresses *[]string `json:"destinationAddresses,omitempty"`
+ // DestinationPorts - List of destination ports.
+ DestinationPorts *[]string `json:"destinationPorts,omitempty"`
+ // Name - Name of the rule condition.
+ Name *string `json:"name,omitempty"`
+ // Description - Description of the rule condition.
+ Description *string `json:"description,omitempty"`
+ // RuleConditionType - Possible values include: 'RuleConditionTypeFirewallPolicyRuleCondition', 'RuleConditionTypeApplicationRuleCondition', 'RuleConditionTypeNetworkRuleCondition'
+ RuleConditionType RuleConditionType `json:"ruleConditionType,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for RuleCondition.
+func (rc RuleCondition) MarshalJSON() ([]byte, error) {
+ rc.RuleConditionType = RuleConditionTypeNetworkRuleCondition
+ objectMap := make(map[string]interface{})
+ if rc.IPProtocols != nil {
+ objectMap["ipProtocols"] = rc.IPProtocols
+ }
+ if rc.SourceAddresses != nil {
+ objectMap["sourceAddresses"] = rc.SourceAddresses
+ }
+ if rc.DestinationAddresses != nil {
+ objectMap["destinationAddresses"] = rc.DestinationAddresses
+ }
+ if rc.DestinationPorts != nil {
+ objectMap["destinationPorts"] = rc.DestinationPorts
+ }
+ if rc.Name != nil {
+ objectMap["name"] = rc.Name
+ }
+ if rc.Description != nil {
+ objectMap["description"] = rc.Description
+ }
+ if rc.RuleConditionType != "" {
+ objectMap["ruleConditionType"] = rc.RuleConditionType
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsApplicationRuleCondition is the BasicFirewallPolicyRuleCondition implementation for RuleCondition.
+func (rc RuleCondition) AsApplicationRuleCondition() (*ApplicationRuleCondition, bool) {
+ return nil, false
+}
+
+// AsRuleCondition is the BasicFirewallPolicyRuleCondition implementation for RuleCondition.
+func (rc RuleCondition) AsRuleCondition() (*RuleCondition, bool) {
+ return &rc, true
+}
+
+// AsFirewallPolicyRuleCondition is the BasicFirewallPolicyRuleCondition implementation for RuleCondition.
+func (rc RuleCondition) AsFirewallPolicyRuleCondition() (*FirewallPolicyRuleCondition, bool) {
+ return nil, false
+}
+
+// AsBasicFirewallPolicyRuleCondition is the BasicFirewallPolicyRuleCondition implementation for RuleCondition.
+func (rc RuleCondition) AsBasicFirewallPolicyRuleCondition() (BasicFirewallPolicyRuleCondition, bool) {
+ return &rc, true
+}
+
// SecurityGroup networkSecurityGroup resource.
type SecurityGroup struct {
autorest.Response `json:"-"`
@@ -26709,6 +28187,29 @@ func (future *SubnetsPrepareNetworkPoliciesFuture) Result(client SubnetsClient)
return
}
+// SubnetsUnprepareNetworkPoliciesFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type SubnetsUnprepareNetworkPoliciesFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *SubnetsUnprepareNetworkPoliciesFuture) Result(client SubnetsClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.SubnetsUnprepareNetworkPoliciesFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("network.SubnetsUnprepareNetworkPoliciesFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
// SubResource reference to another subresource.
type SubResource struct {
// ID - Resource ID.
@@ -26909,6 +28410,12 @@ type TunnelConnectionHealth struct {
LastConnectionEstablishedUtcTime *string `json:"lastConnectionEstablishedUtcTime,omitempty"`
}
+// UnprepareNetworkPoliciesRequest details of UnprepareNetworkPolicies for Subnet.
+type UnprepareNetworkPoliciesRequest struct {
+ // ServiceName - The name of the service for which subnet is being unprepared for.
+ ServiceName *string `json:"serviceName,omitempty"`
+}
+
// Usage describes network resource usage.
type Usage struct {
// ID - READ-ONLY; Resource identifier.
diff --git a/services/network/mgmt/2019-06-01/network/networkapi/interfaces.go b/services/network/mgmt/2019-06-01/network/networkapi/interfaces.go
index 491139b4c126..939405bdfd87 100644
--- a/services/network/mgmt/2019-06-01/network/networkapi/interfaces.go
+++ b/services/network/mgmt/2019-06-01/network/networkapi/interfaces.go
@@ -273,6 +273,28 @@ type ExpressRouteLinksClientAPI interface {
var _ ExpressRouteLinksClientAPI = (*network.ExpressRouteLinksClient)(nil)
+// FirewallPoliciesClientAPI contains the set of methods on the FirewallPoliciesClient type.
+type FirewallPoliciesClientAPI interface {
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, firewallPolicyName string, parameters network.FirewallPolicy) (result network.FirewallPoliciesCreateOrUpdateFuture, err error)
+ Delete(ctx context.Context, resourceGroupName string, firewallPolicyName string) (result network.FirewallPoliciesDeleteFuture, err error)
+ Get(ctx context.Context, resourceGroupName string, firewallPolicyName string, expand string) (result network.FirewallPolicy, err error)
+ List(ctx context.Context, resourceGroupName string) (result network.FirewallPolicyListResultPage, err error)
+ ListAll(ctx context.Context) (result network.FirewallPolicyListResultPage, err error)
+ UpdateTags(ctx context.Context, resourceGroupName string, firewallPolicyName string, firewallPolicyParameters network.TagsObject) (result network.FirewallPolicy, err error)
+}
+
+var _ FirewallPoliciesClientAPI = (*network.FirewallPoliciesClient)(nil)
+
+// FirewallPolicyRuleGroupsClientAPI contains the set of methods on the FirewallPolicyRuleGroupsClient type.
+type FirewallPolicyRuleGroupsClientAPI interface {
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, firewallPolicyName string, ruleGroupName string, parameters network.FirewallPolicyRuleGroup) (result network.FirewallPolicyRuleGroupsCreateOrUpdateFuture, err error)
+ Delete(ctx context.Context, resourceGroupName string, firewallPolicyName string, ruleGroupName string) (result network.FirewallPolicyRuleGroupsDeleteFuture, err error)
+ Get(ctx context.Context, resourceGroupName string, firewallPolicyName string, ruleGroupName string) (result network.FirewallPolicyRuleGroup, err error)
+ List(ctx context.Context, resourceGroupName string, firewallPolicyName string) (result network.FirewallPolicyRuleGroupListResultPage, err error)
+}
+
+var _ FirewallPolicyRuleGroupsClientAPI = (*network.FirewallPolicyRuleGroupsClient)(nil)
+
// LoadBalancersClientAPI contains the set of methods on the LoadBalancersClient type.
type LoadBalancersClientAPI interface {
CreateOrUpdate(ctx context.Context, resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer) (result network.LoadBalancersCreateOrUpdateFuture, err error)
@@ -668,6 +690,7 @@ type SubnetsClientAPI interface {
Get(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, err error)
List(ctx context.Context, resourceGroupName string, virtualNetworkName string) (result network.SubnetListResultPage, err error)
PrepareNetworkPolicies(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, prepareNetworkPoliciesRequestParameters network.PrepareNetworkPoliciesRequest) (result network.SubnetsPrepareNetworkPoliciesFuture, err error)
+ UnprepareNetworkPolicies(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, unprepareNetworkPoliciesRequestParameters network.UnprepareNetworkPoliciesRequest) (result network.SubnetsUnprepareNetworkPoliciesFuture, err error)
}
var _ SubnetsClientAPI = (*network.SubnetsClient)(nil)
diff --git a/services/network/mgmt/2019-06-01/network/privatelinkservices.go b/services/network/mgmt/2019-06-01/network/privatelinkservices.go
index 4c31537a0f2b..98ed330c4f6c 100644
--- a/services/network/mgmt/2019-06-01/network/privatelinkservices.go
+++ b/services/network/mgmt/2019-06-01/network/privatelinkservices.go
@@ -1019,6 +1019,8 @@ func (client PrivateLinkServicesClient) UpdatePrivateEndpointConnectionPreparer(
"api-version": APIVersion,
}
+ parameters.Type = nil
+ parameters.Etag = nil
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
diff --git a/services/network/mgmt/2019-06-01/network/servicetags.go b/services/network/mgmt/2019-06-01/network/servicetags.go
index 962c21b801e9..f7d39cf55650 100644
--- a/services/network/mgmt/2019-06-01/network/servicetags.go
+++ b/services/network/mgmt/2019-06-01/network/servicetags.go
@@ -42,7 +42,9 @@ func NewServiceTagsClientWithBaseURI(baseURI string, subscriptionID string) Serv
// List gets a list of service tag information resources.
// Parameters:
-// location - the location.
+// location - the location that will be used as a reference for version (not as a filter based on location, you
+// will get the list of service tags with prefix details across all regions but limited to the cloud that your
+// subscription belongs to).
func (client ServiceTagsClient) List(ctx context.Context, location string) (result ServiceTagsListResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ServiceTagsClient.List")
diff --git a/services/network/mgmt/2019-06-01/network/subnets.go b/services/network/mgmt/2019-06-01/network/subnets.go
index ec9af9af3a99..a2f917cad06c 100644
--- a/services/network/mgmt/2019-06-01/network/subnets.go
+++ b/services/network/mgmt/2019-06-01/network/subnets.go
@@ -479,3 +479,85 @@ func (client SubnetsClient) PrepareNetworkPoliciesResponder(resp *http.Response)
result.Response = resp
return
}
+
+// UnprepareNetworkPolicies unprepares a subnet by removing network intent policies.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// virtualNetworkName - the name of the virtual network.
+// subnetName - the name of the subnet.
+// unprepareNetworkPoliciesRequestParameters - parameters supplied to unprepare subnet to remove network intent
+// policies.
+func (client SubnetsClient) UnprepareNetworkPolicies(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, unprepareNetworkPoliciesRequestParameters UnprepareNetworkPoliciesRequest) (result SubnetsUnprepareNetworkPoliciesFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SubnetsClient.UnprepareNetworkPolicies")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.UnprepareNetworkPoliciesPreparer(ctx, resourceGroupName, virtualNetworkName, subnetName, unprepareNetworkPoliciesRequestParameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.SubnetsClient", "UnprepareNetworkPolicies", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.UnprepareNetworkPoliciesSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "network.SubnetsClient", "UnprepareNetworkPolicies", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// UnprepareNetworkPoliciesPreparer prepares the UnprepareNetworkPolicies request.
+func (client SubnetsClient) UnprepareNetworkPoliciesPreparer(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, unprepareNetworkPoliciesRequestParameters UnprepareNetworkPoliciesRequest) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subnetName": autorest.Encode("path", subnetName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "virtualNetworkName": autorest.Encode("path", virtualNetworkName),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}/UnprepareNetworkPolicies", pathParameters),
+ autorest.WithJSON(unprepareNetworkPoliciesRequestParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UnprepareNetworkPoliciesSender sends the UnprepareNetworkPolicies request. The method will close the
+// http.Response Body if it receives an error.
+func (client SubnetsClient) UnprepareNetworkPoliciesSender(req *http.Request) (future SubnetsUnprepareNetworkPoliciesFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// UnprepareNetworkPoliciesResponder handles the response to the UnprepareNetworkPolicies request. The method always
+// closes the http.Response Body.
+func (client SubnetsClient) UnprepareNetworkPoliciesResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
diff --git a/services/network/mgmt/2019-06-01/network/virtualnetworkgatewayconnections.go b/services/network/mgmt/2019-06-01/network/virtualnetworkgatewayconnections.go
index 359518760166..7875cf5ea19d 100644
--- a/services/network/mgmt/2019-06-01/network/virtualnetworkgatewayconnections.go
+++ b/services/network/mgmt/2019-06-01/network/virtualnetworkgatewayconnections.go
@@ -734,7 +734,7 @@ func (client VirtualNetworkGatewayConnectionsClient) UpdateTagsResponder(resp *h
err = autorest.Respond(
resp,
client.ByInspecting(),
- azure.WithErrorUnlessStatusCode(http.StatusOK),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
diff --git a/services/network/mgmt/2019-06-01/network/virtualnetworkgateways.go b/services/network/mgmt/2019-06-01/network/virtualnetworkgateways.go
index 97e2ee419572..cd31f47b6331 100644
--- a/services/network/mgmt/2019-06-01/network/virtualnetworkgateways.go
+++ b/services/network/mgmt/2019-06-01/network/virtualnetworkgateways.go
@@ -1542,7 +1542,7 @@ func (client VirtualNetworkGatewaysClient) UpdateTagsResponder(resp *http.Respon
err = autorest.Respond(
resp,
client.ByInspecting(),
- azure.WithErrorUnlessStatusCode(http.StatusOK),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
diff --git a/services/preview/alertsmanagement/mgmt/2019-05-05/alertsmanagement/actionrules.go b/services/preview/alertsmanagement/mgmt/2019-05-05/alertsmanagement/actionrules.go
index 491d52c4c3eb..4f649164c364 100644
--- a/services/preview/alertsmanagement/mgmt/2019-05-05/alertsmanagement/actionrules.go
+++ b/services/preview/alertsmanagement/mgmt/2019-05-05/alertsmanagement/actionrules.go
@@ -21,6 +21,7 @@ import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -56,6 +57,12 @@ func (client ActionRulesClient) CreateUpdate(ctx context.Context, resourceGroupN
tracing.EndSpan(ctx, sc, err)
}()
}
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("alertsmanagement.ActionRulesClient", "CreateUpdate", err.Error())
+ }
+
req, err := client.CreateUpdatePreparer(ctx, resourceGroupName, actionRuleName, actionRule)
if err != nil {
err = autorest.NewErrorWithError(err, "alertsmanagement.ActionRulesClient", "CreateUpdate", nil, "Failure preparing request")
@@ -135,6 +142,12 @@ func (client ActionRulesClient) Delete(ctx context.Context, resourceGroupName st
tracing.EndSpan(ctx, sc, err)
}()
}
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("alertsmanagement.ActionRulesClient", "Delete", err.Error())
+ }
+
req, err := client.DeletePreparer(ctx, resourceGroupName, actionRuleName)
if err != nil {
err = autorest.NewErrorWithError(err, "alertsmanagement.ActionRulesClient", "Delete", nil, "Failure preparing request")
@@ -212,6 +225,12 @@ func (client ActionRulesClient) GetByName(ctx context.Context, resourceGroupName
tracing.EndSpan(ctx, sc, err)
}()
}
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("alertsmanagement.ActionRulesClient", "GetByName", err.Error())
+ }
+
req, err := client.GetByNamePreparer(ctx, resourceGroupName, actionRuleName)
if err != nil {
err = autorest.NewErrorWithError(err, "alertsmanagement.ActionRulesClient", "GetByName", nil, "Failure preparing request")
@@ -300,6 +319,12 @@ func (client ActionRulesClient) ListByResourceGroup(ctx context.Context, resourc
tracing.EndSpan(ctx, sc, err)
}()
}
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("alertsmanagement.ActionRulesClient", "ListByResourceGroup", err.Error())
+ }
+
result.fn = client.listByResourceGroupNextResults
req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName, targetResourceGroup, targetResourceType, targetResource, severity, monitorService, impactedScope, description, alertRuleID, actionGroup, name)
if err != nil {
@@ -453,6 +478,12 @@ func (client ActionRulesClient) ListBySubscription(ctx context.Context, targetRe
tracing.EndSpan(ctx, sc, err)
}()
}
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("alertsmanagement.ActionRulesClient", "ListBySubscription", err.Error())
+ }
+
result.fn = client.listBySubscriptionNextResults
req, err := client.ListBySubscriptionPreparer(ctx, targetResourceGroup, targetResourceType, targetResource, severity, monitorService, impactedScope, description, alertRuleID, actionGroup, name)
if err != nil {
@@ -597,6 +628,12 @@ func (client ActionRulesClient) Update(ctx context.Context, resourceGroupName st
tracing.EndSpan(ctx, sc, err)
}()
}
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("alertsmanagement.ActionRulesClient", "Update", err.Error())
+ }
+
req, err := client.UpdatePreparer(ctx, resourceGroupName, actionRuleName, actionRulePatch)
if err != nil {
err = autorest.NewErrorWithError(err, "alertsmanagement.ActionRulesClient", "Update", nil, "Failure preparing request")
diff --git a/services/preview/alertsmanagement/mgmt/2019-05-05/alertsmanagement/alerts.go b/services/preview/alertsmanagement/mgmt/2019-05-05/alertsmanagement/alerts.go
index a7e449dc3f64..3d2eaeb1847e 100644
--- a/services/preview/alertsmanagement/mgmt/2019-05-05/alertsmanagement/alerts.go
+++ b/services/preview/alertsmanagement/mgmt/2019-05-05/alertsmanagement/alerts.go
@@ -21,6 +21,7 @@ import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -55,6 +56,12 @@ func (client AlertsClient) ChangeState(ctx context.Context, alertID string, newS
tracing.EndSpan(ctx, sc, err)
}()
}
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("alertsmanagement.AlertsClient", "ChangeState", err.Error())
+ }
+
req, err := client.ChangeStatePreparer(ctx, alertID, newState)
if err != nil {
err = autorest.NewErrorWithError(err, "alertsmanagement.AlertsClient", "ChangeState", nil, "Failure preparing request")
@@ -157,6 +164,12 @@ func (client AlertsClient) GetAll(ctx context.Context, targetResource string, ta
tracing.EndSpan(ctx, sc, err)
}()
}
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("alertsmanagement.AlertsClient", "GetAll", err.Error())
+ }
+
result.fn = client.getAllNextResults
req, err := client.GetAllPreparer(ctx, targetResource, targetResourceType, targetResourceGroup, monitorService, monitorCondition, severity, alertState, alertRule, smartGroupID, includeContext, includeEgressConfig, pageCount, sortBy, sortOrder, selectParameter, timeRange, customTimeRange)
if err != nil {
@@ -320,6 +333,12 @@ func (client AlertsClient) GetByID(ctx context.Context, alertID string) (result
tracing.EndSpan(ctx, sc, err)
}()
}
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("alertsmanagement.AlertsClient", "GetByID", err.Error())
+ }
+
req, err := client.GetByIDPreparer(ctx, alertID)
if err != nil {
err = autorest.NewErrorWithError(err, "alertsmanagement.AlertsClient", "GetByID", nil, "Failure preparing request")
@@ -396,6 +415,12 @@ func (client AlertsClient) GetHistory(ctx context.Context, alertID string) (resu
tracing.EndSpan(ctx, sc, err)
}()
}
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("alertsmanagement.AlertsClient", "GetHistory", err.Error())
+ }
+
req, err := client.GetHistoryPreparer(ctx, alertID)
if err != nil {
err = autorest.NewErrorWithError(err, "alertsmanagement.AlertsClient", "GetHistory", nil, "Failure preparing request")
@@ -487,6 +512,12 @@ func (client AlertsClient) GetSummary(ctx context.Context, groupby AlertsSummary
tracing.EndSpan(ctx, sc, err)
}()
}
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("alertsmanagement.AlertsClient", "GetSummary", err.Error())
+ }
+
req, err := client.GetSummaryPreparer(ctx, groupby, includeSmartGroupsCount, targetResource, targetResourceType, targetResourceGroup, monitorService, monitorCondition, severity, alertState, alertRule, timeRange, customTimeRange)
if err != nil {
err = autorest.NewErrorWithError(err, "alertsmanagement.AlertsClient", "GetSummary", nil, "Failure preparing request")
diff --git a/services/preview/alertsmanagement/mgmt/2019-05-05/alertsmanagement/smartgroups.go b/services/preview/alertsmanagement/mgmt/2019-05-05/alertsmanagement/smartgroups.go
index b19282ee3a08..7dfe878d1f40 100644
--- a/services/preview/alertsmanagement/mgmt/2019-05-05/alertsmanagement/smartgroups.go
+++ b/services/preview/alertsmanagement/mgmt/2019-05-05/alertsmanagement/smartgroups.go
@@ -21,6 +21,7 @@ import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -55,6 +56,12 @@ func (client SmartGroupsClient) ChangeState(ctx context.Context, smartGroupID st
tracing.EndSpan(ctx, sc, err)
}()
}
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("alertsmanagement.SmartGroupsClient", "ChangeState", err.Error())
+ }
+
req, err := client.ChangeStatePreparer(ctx, smartGroupID, newState)
if err != nil {
err = autorest.NewErrorWithError(err, "alertsmanagement.SmartGroupsClient", "ChangeState", nil, "Failure preparing request")
@@ -144,6 +151,12 @@ func (client SmartGroupsClient) GetAll(ctx context.Context, targetResource strin
tracing.EndSpan(ctx, sc, err)
}()
}
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("alertsmanagement.SmartGroupsClient", "GetAll", err.Error())
+ }
+
result.fn = client.getAllNextResults
req, err := client.GetAllPreparer(ctx, targetResource, targetResourceGroup, targetResourceType, monitorService, monitorCondition, severity, smartGroupState, timeRange, pageCount, sortBy, sortOrder)
if err != nil {
@@ -289,6 +302,12 @@ func (client SmartGroupsClient) GetByID(ctx context.Context, smartGroupID string
tracing.EndSpan(ctx, sc, err)
}()
}
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("alertsmanagement.SmartGroupsClient", "GetByID", err.Error())
+ }
+
req, err := client.GetByIDPreparer(ctx, smartGroupID)
if err != nil {
err = autorest.NewErrorWithError(err, "alertsmanagement.SmartGroupsClient", "GetByID", nil, "Failure preparing request")
@@ -364,6 +383,12 @@ func (client SmartGroupsClient) GetHistory(ctx context.Context, smartGroupID str
tracing.EndSpan(ctx, sc, err)
}()
}
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("alertsmanagement.SmartGroupsClient", "GetHistory", err.Error())
+ }
+
req, err := client.GetHistoryPreparer(ctx, smartGroupID)
if err != nil {
err = autorest.NewErrorWithError(err, "alertsmanagement.SmartGroupsClient", "GetHistory", nil, "Failure preparing request")
diff --git a/services/preview/billing/mgmt/2018-11-01-preview/billing/accounts.go b/services/preview/billing/mgmt/2018-11-01-preview/billing/accounts.go
index 5bfda1c18a4e..987a43b45d32 100644
--- a/services/preview/billing/mgmt/2018-11-01-preview/billing/accounts.go
+++ b/services/preview/billing/mgmt/2018-11-01-preview/billing/accounts.go
@@ -118,20 +118,21 @@ func (client AccountsClient) GetResponder(resp *http.Response) (result Account,
return
}
-// List lists all billing accounts for a user which he has access to.
+// List lists all billing accounts for which a user has access.
// Parameters:
// expand - may be used to expand the invoiceSections and billingProfiles.
-func (client AccountsClient) List(ctx context.Context, expand string) (result AccountListResult, err error) {
+func (client AccountsClient) List(ctx context.Context, expand string) (result AccountListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.List")
defer func() {
sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
+ if result.alr.Response.Response != nil {
+ sc = result.alr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
+ result.fn = client.listNextResults
req, err := client.ListPreparer(ctx, expand)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.AccountsClient", "List", nil, "Failure preparing request")
@@ -140,12 +141,12 @@ func (client AccountsClient) List(ctx context.Context, expand string) (result Ac
resp, err := client.ListSender(req)
if err != nil {
- result.Response = autorest.Response{Response: resp}
+ result.alr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "billing.AccountsClient", "List", resp, "Failure sending request")
return
}
- result, err = client.ListResponder(resp)
+ result.alr, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.AccountsClient", "List", resp, "Failure responding to request")
}
@@ -191,6 +192,43 @@ func (client AccountsClient) ListResponder(resp *http.Response) (result AccountL
return
}
+// listNextResults retrieves the next set of results, if any.
+func (client AccountsClient) listNextResults(ctx context.Context, lastResults AccountListResult) (result AccountListResult, err error) {
+ req, err := lastResults.accountListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "billing.AccountsClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "billing.AccountsClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "billing.AccountsClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client AccountsClient) ListComplete(ctx context.Context, expand string) (result AccountListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, expand)
+ return
+}
+
// Update the operation to update a billing account.
// Parameters:
// billingAccountName - billing Account Id.
diff --git a/services/preview/billing/mgmt/2018-11-01-preview/billing/agreements.go b/services/preview/billing/mgmt/2018-11-01-preview/billing/agreements.go
index c13aba8d0666..dfd35f44fef7 100644
--- a/services/preview/billing/mgmt/2018-11-01-preview/billing/agreements.go
+++ b/services/preview/billing/mgmt/2018-11-01-preview/billing/agreements.go
@@ -124,17 +124,18 @@ func (client AgreementsClient) GetResponder(resp *http.Response) (result Agreeme
// Parameters:
// billingAccountName - billing Account Id.
// expand - may be used to expand the participants.
-func (client AgreementsClient) ListByBillingAccountName(ctx context.Context, billingAccountName string, expand string) (result AgreementListResult, err error) {
+func (client AgreementsClient) ListByBillingAccountName(ctx context.Context, billingAccountName string, expand string) (result AgreementListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/AgreementsClient.ListByBillingAccountName")
defer func() {
sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
+ if result.alr.Response.Response != nil {
+ sc = result.alr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
+ result.fn = client.listByBillingAccountNameNextResults
req, err := client.ListByBillingAccountNamePreparer(ctx, billingAccountName, expand)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.AgreementsClient", "ListByBillingAccountName", nil, "Failure preparing request")
@@ -143,12 +144,12 @@ func (client AgreementsClient) ListByBillingAccountName(ctx context.Context, bil
resp, err := client.ListByBillingAccountNameSender(req)
if err != nil {
- result.Response = autorest.Response{Response: resp}
+ result.alr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "billing.AgreementsClient", "ListByBillingAccountName", resp, "Failure sending request")
return
}
- result, err = client.ListByBillingAccountNameResponder(resp)
+ result.alr, err = client.ListByBillingAccountNameResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.AgreementsClient", "ListByBillingAccountName", resp, "Failure responding to request")
}
@@ -197,3 +198,40 @@ func (client AgreementsClient) ListByBillingAccountNameResponder(resp *http.Resp
result.Response = autorest.Response{Response: resp}
return
}
+
+// listByBillingAccountNameNextResults retrieves the next set of results, if any.
+func (client AgreementsClient) listByBillingAccountNameNextResults(ctx context.Context, lastResults AgreementListResult) (result AgreementListResult, err error) {
+ req, err := lastResults.agreementListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "billing.AgreementsClient", "listByBillingAccountNameNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByBillingAccountNameSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "billing.AgreementsClient", "listByBillingAccountNameNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByBillingAccountNameResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "billing.AgreementsClient", "listByBillingAccountNameNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByBillingAccountNameComplete enumerates all values, automatically crossing page boundaries as required.
+func (client AgreementsClient) ListByBillingAccountNameComplete(ctx context.Context, billingAccountName string, expand string) (result AgreementListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AgreementsClient.ListByBillingAccountName")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByBillingAccountName(ctx, billingAccountName, expand)
+ return
+}
diff --git a/services/preview/billing/mgmt/2018-11-01-preview/billing/billingapi/interfaces.go b/services/preview/billing/mgmt/2018-11-01-preview/billing/billingapi/interfaces.go
index d4dbd586a09b..e99ffd0d4b4b 100644
--- a/services/preview/billing/mgmt/2018-11-01-preview/billing/billingapi/interfaces.go
+++ b/services/preview/billing/mgmt/2018-11-01-preview/billing/billingapi/interfaces.go
@@ -26,7 +26,7 @@ import (
// AccountsClientAPI contains the set of methods on the AccountsClient type.
type AccountsClientAPI interface {
Get(ctx context.Context, billingAccountName string, expand string) (result billing.Account, err error)
- List(ctx context.Context, expand string) (result billing.AccountListResult, err error)
+ List(ctx context.Context, expand string) (result billing.AccountListResultPage, err error)
Update(ctx context.Context, billingAccountName string, parameters billing.AccountUpdateProperties) (result billing.AccountsUpdateFuture, err error)
}
@@ -58,20 +58,28 @@ var _ AvailableBalancesClientAPI = (*billing.AvailableBalancesClient)(nil)
type ProfilesClientAPI interface {
Create(ctx context.Context, billingAccountName string, parameters billing.ProfileCreationParameters) (result billing.ProfilesCreateFuture, err error)
Get(ctx context.Context, billingAccountName string, billingProfileName string, expand string) (result billing.Profile, err error)
- ListByBillingAccountName(ctx context.Context, billingAccountName string, expand string) (result billing.ProfileListResult, err error)
+ ListByBillingAccountName(ctx context.Context, billingAccountName string, expand string) (result billing.ProfileListResultPage, err error)
Update(ctx context.Context, billingAccountName string, billingProfileName string, parameters billing.Profile) (result billing.ProfilesUpdateFuture, err error)
}
var _ ProfilesClientAPI = (*billing.ProfilesClient)(nil)
+// CustomersClientAPI contains the set of methods on the CustomersClient type.
+type CustomersClientAPI interface {
+ Get(ctx context.Context, billingAccountName string, customerName string, expand string) (result billing.Customer, err error)
+ ListByBillingAccountName(ctx context.Context, billingAccountName string, filter string, skiptoken string) (result billing.CustomerListResultPage, err error)
+}
+
+var _ CustomersClientAPI = (*billing.CustomersClient)(nil)
+
// InvoiceSectionsClientAPI contains the set of methods on the InvoiceSectionsClient type.
type InvoiceSectionsClientAPI interface {
Create(ctx context.Context, billingAccountName string, parameters billing.InvoiceSectionCreationRequest) (result billing.InvoiceSectionsCreateFuture, err error)
ElevateToBillingProfile(ctx context.Context, billingAccountName string, invoiceSectionName string) (result autorest.Response, err error)
Get(ctx context.Context, billingAccountName string, invoiceSectionName string, expand string) (result billing.InvoiceSection, err error)
ListByBillingAccountName(ctx context.Context, billingAccountName string, expand string) (result billing.InvoiceSectionListResult, err error)
- ListByBillingProfileName(ctx context.Context, billingAccountName string, billingProfileName string) (result billing.InvoiceSectionListResult, err error)
- ListByCreateSubscriptionPermission(ctx context.Context, billingAccountName string, expand string) (result billing.InvoiceSectionListResult, err error)
+ ListByBillingProfileName(ctx context.Context, billingAccountName string, billingProfileName string) (result billing.InvoiceSectionListResultPage, err error)
+ ListByCreateSubscriptionPermission(ctx context.Context, billingAccountName string, expand string) (result billing.InvoiceSectionListResultPage, err error)
Update(ctx context.Context, billingAccountName string, invoiceSectionName string, parameters billing.InvoiceSection) (result billing.InvoiceSectionsUpdateFuture, err error)
}
@@ -80,7 +88,7 @@ var _ InvoiceSectionsClientAPI = (*billing.InvoiceSectionsClient)(nil)
// DepartmentsClientAPI contains the set of methods on the DepartmentsClient type.
type DepartmentsClientAPI interface {
Get(ctx context.Context, billingAccountName string, departmentName string, expand string, filter string) (result billing.Department, err error)
- ListByBillingAccountName(ctx context.Context, billingAccountName string, expand string, filter string) (result billing.DepartmentListResult, err error)
+ ListByBillingAccountName(ctx context.Context, billingAccountName string, expand string, filter string) (result billing.DepartmentListResultPage, err error)
}
var _ DepartmentsClientAPI = (*billing.DepartmentsClient)(nil)
@@ -88,7 +96,7 @@ var _ DepartmentsClientAPI = (*billing.DepartmentsClient)(nil)
// EnrollmentAccountsClientAPI contains the set of methods on the EnrollmentAccountsClient type.
type EnrollmentAccountsClientAPI interface {
GetByEnrollmentAccountID(ctx context.Context, billingAccountName string, enrollmentAccountName string, expand string, filter string) (result billing.EnrollmentAccount, err error)
- ListByBillingAccountName(ctx context.Context, billingAccountName string, expand string, filter string) (result billing.EnrollmentAccountListResult, err error)
+ ListByBillingAccountName(ctx context.Context, billingAccountName string, expand string, filter string) (result billing.EnrollmentAccountListResultPage, err error)
}
var _ EnrollmentAccountsClientAPI = (*billing.EnrollmentAccountsClient)(nil)
@@ -96,8 +104,8 @@ var _ EnrollmentAccountsClientAPI = (*billing.EnrollmentAccountsClient)(nil)
// InvoicesClientAPI contains the set of methods on the InvoicesClient type.
type InvoicesClientAPI interface {
Get(ctx context.Context, billingAccountName string, billingProfileName string, invoiceName string) (result billing.InvoiceSummary, err error)
- ListByBillingAccountName(ctx context.Context, billingAccountName string, periodStartDate string, periodEndDate string) (result billing.InvoiceListResult, err error)
- ListByBillingProfile(ctx context.Context, billingAccountName string, billingProfileName string, periodStartDate string, periodEndDate string) (result billing.InvoiceListResult, err error)
+ ListByBillingAccountName(ctx context.Context, billingAccountName string, periodStartDate string, periodEndDate string) (result billing.InvoiceListResultPage, err error)
+ ListByBillingProfile(ctx context.Context, billingAccountName string, billingProfileName string, periodStartDate string, periodEndDate string) (result billing.InvoiceListResultPage, err error)
}
var _ InvoicesClientAPI = (*billing.InvoicesClient)(nil)
@@ -112,9 +120,11 @@ var _ PriceSheetClientAPI = (*billing.PriceSheetClient)(nil)
// SubscriptionsClientAPI contains the set of methods on the SubscriptionsClient type.
type SubscriptionsClientAPI interface {
Get(ctx context.Context, billingAccountName string, invoiceSectionName string, billingSubscriptionName string) (result billing.SubscriptionSummary, err error)
+ GetByCustomerName(ctx context.Context, billingAccountName string, customerName string, billingSubscriptionName string) (result billing.SubscriptionSummary, err error)
ListByBillingAccountName(ctx context.Context, billingAccountName string) (result billing.SubscriptionsListResultPage, err error)
- ListByBillingProfileName(ctx context.Context, billingAccountName string, billingProfileName string) (result billing.SubscriptionsListResult, err error)
- ListByInvoiceSectionName(ctx context.Context, billingAccountName string, invoiceSectionName string) (result billing.SubscriptionsListResult, err error)
+ ListByBillingProfileName(ctx context.Context, billingAccountName string, billingProfileName string) (result billing.SubscriptionsListResultPage, err error)
+ ListByCustomerName(ctx context.Context, billingAccountName string, customerName string) (result billing.SubscriptionsListResultPage, err error)
+ ListByInvoiceSectionName(ctx context.Context, billingAccountName string, invoiceSectionName string) (result billing.SubscriptionsListResultPage, err error)
Transfer(ctx context.Context, billingAccountName string, invoiceSectionName string, billingSubscriptionName string, parameters billing.TransferBillingSubscriptionRequestProperties) (result billing.SubscriptionsTransferFuture, err error)
ValidateTransfer(ctx context.Context, billingAccountName string, invoiceSectionName string, billingSubscriptionName string, parameters billing.TransferBillingSubscriptionRequestProperties) (result billing.ValidateSubscriptionTransferEligibilityResult, err error)
}
@@ -125,7 +135,7 @@ var _ SubscriptionsClientAPI = (*billing.SubscriptionsClient)(nil)
type ProductsClientAPI interface {
Get(ctx context.Context, billingAccountName string, invoiceSectionName string, productName string) (result billing.ProductSummary, err error)
ListByBillingAccountName(ctx context.Context, billingAccountName string, filter string) (result billing.ProductsListResultPage, err error)
- ListByInvoiceSectionName(ctx context.Context, billingAccountName string, invoiceSectionName string, filter string) (result billing.ProductsListResult, err error)
+ ListByInvoiceSectionName(ctx context.Context, billingAccountName string, invoiceSectionName string, filter string) (result billing.ProductsListResultPage, err error)
Transfer(ctx context.Context, billingAccountName string, invoiceSectionName string, productName string, parameters billing.TransferProductRequestProperties) (result billing.ProductSummary, err error)
UpdateAutoRenewByBillingAccountName(ctx context.Context, billingAccountName string, productName string, body billing.UpdateAutoRenewRequest) (result billing.UpdateAutoRenewOperationSummary, err error)
UpdateAutoRenewByInvoiceSectionName(ctx context.Context, billingAccountName string, invoiceSectionName string, productName string, body billing.UpdateAutoRenewRequest) (result billing.UpdateAutoRenewOperationSummary, err error)
@@ -137,8 +147,9 @@ var _ ProductsClientAPI = (*billing.ProductsClient)(nil)
// TransactionsClientAPI contains the set of methods on the TransactionsClient type.
type TransactionsClientAPI interface {
ListByBillingAccountName(ctx context.Context, billingAccountName string, startDate string, endDate string, filter string) (result billing.TransactionsListResultPage, err error)
- ListByBillingProfileName(ctx context.Context, billingAccountName string, billingProfileName string, startDate string, endDate string, filter string) (result billing.TransactionsListResult, err error)
- ListByInvoiceSectionName(ctx context.Context, billingAccountName string, invoiceSectionName string, startDate string, endDate string, filter string) (result billing.TransactionsListResult, err error)
+ ListByBillingProfileName(ctx context.Context, billingAccountName string, billingProfileName string, startDate string, endDate string, filter string) (result billing.TransactionsListResultPage, err error)
+ ListByCustomerName(ctx context.Context, billingAccountName string, customerName string, startDate string, endDate string, filter string) (result billing.TransactionsListResultPage, err error)
+ ListByInvoiceSectionName(ctx context.Context, billingAccountName string, invoiceSectionName string, startDate string, endDate string, filter string) (result billing.TransactionsListResultPage, err error)
}
var _ TransactionsClientAPI = (*billing.TransactionsClient)(nil)
@@ -189,6 +200,7 @@ var _ OperationsClientAPI = (*billing.OperationsClient)(nil)
type PermissionsClientAPI interface {
ListByBillingAccount(ctx context.Context, billingAccountName string) (result billing.PermissionsListResult, err error)
ListByBillingProfile(ctx context.Context, billingAccountName string, billingProfileName string) (result billing.PermissionsListResult, err error)
+ ListByCustomers(ctx context.Context, billingAccountName string, customerName string) (result billing.PermissionsListResult, err error)
ListByInvoiceSections(ctx context.Context, billingAccountName string, invoiceSectionName string) (result billing.PermissionsListResult, err error)
}
@@ -227,7 +239,7 @@ var _ RoleAssignmentsClientAPI = (*billing.RoleAssignmentsClient)(nil)
// AgreementsClientAPI contains the set of methods on the AgreementsClient type.
type AgreementsClientAPI interface {
Get(ctx context.Context, billingAccountName string, agreementName string, expand string) (result billing.Agreement, err error)
- ListByBillingAccountName(ctx context.Context, billingAccountName string, expand string) (result billing.AgreementListResult, err error)
+ ListByBillingAccountName(ctx context.Context, billingAccountName string, expand string) (result billing.AgreementListResultPage, err error)
}
var _ AgreementsClientAPI = (*billing.AgreementsClient)(nil)
diff --git a/services/preview/billing/mgmt/2018-11-01-preview/billing/customers.go b/services/preview/billing/mgmt/2018-11-01-preview/billing/customers.go
new file mode 100644
index 000000000000..1747fd15439e
--- /dev/null
+++ b/services/preview/billing/mgmt/2018-11-01-preview/billing/customers.go
@@ -0,0 +1,244 @@
+package billing
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// CustomersClient is the billing client provides access to billing resources for Azure subscriptions.
+type CustomersClient struct {
+ BaseClient
+}
+
+// NewCustomersClient creates an instance of the CustomersClient client.
+func NewCustomersClient(subscriptionID string) CustomersClient {
+ return NewCustomersClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewCustomersClientWithBaseURI creates an instance of the CustomersClient client.
+func NewCustomersClientWithBaseURI(baseURI string, subscriptionID string) CustomersClient {
+ return CustomersClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// Get get the customer by id.
+// Parameters:
+// billingAccountName - billing Account Id.
+// customerName - customer Id.
+// expand - may be used to expand enabledAzureSkus, resellers.
+func (client CustomersClient) Get(ctx context.Context, billingAccountName string, customerName string, expand string) (result Customer, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CustomersClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, billingAccountName, customerName, expand)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "billing.CustomersClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "billing.CustomersClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "billing.CustomersClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client CustomersClient) GetPreparer(ctx context.Context, billingAccountName string, customerName string, expand string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "billingAccountName": autorest.Encode("path", billingAccountName),
+ "customerName": autorest.Encode("path", customerName),
+ }
+
+ const APIVersion = "2018-11-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(expand) > 0 {
+ queryParameters["$expand"] = autorest.Encode("query", expand)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/customers/{customerName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client CustomersClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client CustomersClient) GetResponder(resp *http.Response) (result Customer, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByBillingAccountName lists all customers which the current user can work with on-behalf of a partner.
+// Parameters:
+// billingAccountName - billing Account Id.
+// filter - may be used to filter using hasPermission('{permissionId}') to only return customers for which the
+// caller has the specified permission.
+// skiptoken - skiptoken is only used if a previous operation returned a partial result. If a previous response
+// contains a nextLink element, the value of the nextLink element will include a skiptoken parameter that
+// specifies a starting point to use for subsequent calls.
+func (client CustomersClient) ListByBillingAccountName(ctx context.Context, billingAccountName string, filter string, skiptoken string) (result CustomerListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CustomersClient.ListByBillingAccountName")
+ defer func() {
+ sc := -1
+ if result.clr.Response.Response != nil {
+ sc = result.clr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listByBillingAccountNameNextResults
+ req, err := client.ListByBillingAccountNamePreparer(ctx, billingAccountName, filter, skiptoken)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "billing.CustomersClient", "ListByBillingAccountName", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByBillingAccountNameSender(req)
+ if err != nil {
+ result.clr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "billing.CustomersClient", "ListByBillingAccountName", resp, "Failure sending request")
+ return
+ }
+
+ result.clr, err = client.ListByBillingAccountNameResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "billing.CustomersClient", "ListByBillingAccountName", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByBillingAccountNamePreparer prepares the ListByBillingAccountName request.
+func (client CustomersClient) ListByBillingAccountNamePreparer(ctx context.Context, billingAccountName string, filter string, skiptoken string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "billingAccountName": autorest.Encode("path", billingAccountName),
+ }
+
+ const APIVersion = "2018-11-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
+ if len(skiptoken) > 0 {
+ queryParameters["$skiptoken"] = autorest.Encode("query", skiptoken)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/customers", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByBillingAccountNameSender sends the ListByBillingAccountName request. The method will close the
+// http.Response Body if it receives an error.
+func (client CustomersClient) ListByBillingAccountNameSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByBillingAccountNameResponder handles the response to the ListByBillingAccountName request. The method always
+// closes the http.Response Body.
+func (client CustomersClient) ListByBillingAccountNameResponder(resp *http.Response) (result CustomerListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByBillingAccountNameNextResults retrieves the next set of results, if any.
+func (client CustomersClient) listByBillingAccountNameNextResults(ctx context.Context, lastResults CustomerListResult) (result CustomerListResult, err error) {
+ req, err := lastResults.customerListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "billing.CustomersClient", "listByBillingAccountNameNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByBillingAccountNameSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "billing.CustomersClient", "listByBillingAccountNameNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByBillingAccountNameResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "billing.CustomersClient", "listByBillingAccountNameNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByBillingAccountNameComplete enumerates all values, automatically crossing page boundaries as required.
+func (client CustomersClient) ListByBillingAccountNameComplete(ctx context.Context, billingAccountName string, filter string, skiptoken string) (result CustomerListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CustomersClient.ListByBillingAccountName")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByBillingAccountName(ctx, billingAccountName, filter, skiptoken)
+ return
+}
diff --git a/services/preview/billing/mgmt/2018-11-01-preview/billing/departments.go b/services/preview/billing/mgmt/2018-11-01-preview/billing/departments.go
index 30dcd0aa2aef..3512a0433b5e 100644
--- a/services/preview/billing/mgmt/2018-11-01-preview/billing/departments.go
+++ b/services/preview/billing/mgmt/2018-11-01-preview/billing/departments.go
@@ -125,23 +125,24 @@ func (client DepartmentsClient) GetResponder(resp *http.Response) (result Depart
return
}
-// ListByBillingAccountName lists all departments for a user which he has access to.
+// ListByBillingAccountName lists all departments for which a user has access.
// Parameters:
// billingAccountName - billing Account Id.
// expand - may be used to expand the enrollmentAccounts.
// filter - the filter supports 'eq', 'lt', 'gt', 'le', 'ge', and 'and'. It does not currently support 'ne',
// 'or', or 'not'. Tag filter is a key value pair string where key and value is separated by a colon (:).
-func (client DepartmentsClient) ListByBillingAccountName(ctx context.Context, billingAccountName string, expand string, filter string) (result DepartmentListResult, err error) {
+func (client DepartmentsClient) ListByBillingAccountName(ctx context.Context, billingAccountName string, expand string, filter string) (result DepartmentListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/DepartmentsClient.ListByBillingAccountName")
defer func() {
sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
+ if result.dlr.Response.Response != nil {
+ sc = result.dlr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
+ result.fn = client.listByBillingAccountNameNextResults
req, err := client.ListByBillingAccountNamePreparer(ctx, billingAccountName, expand, filter)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.DepartmentsClient", "ListByBillingAccountName", nil, "Failure preparing request")
@@ -150,12 +151,12 @@ func (client DepartmentsClient) ListByBillingAccountName(ctx context.Context, bi
resp, err := client.ListByBillingAccountNameSender(req)
if err != nil {
- result.Response = autorest.Response{Response: resp}
+ result.dlr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "billing.DepartmentsClient", "ListByBillingAccountName", resp, "Failure sending request")
return
}
- result, err = client.ListByBillingAccountNameResponder(resp)
+ result.dlr, err = client.ListByBillingAccountNameResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.DepartmentsClient", "ListByBillingAccountName", resp, "Failure responding to request")
}
@@ -207,3 +208,40 @@ func (client DepartmentsClient) ListByBillingAccountNameResponder(resp *http.Res
result.Response = autorest.Response{Response: resp}
return
}
+
+// listByBillingAccountNameNextResults retrieves the next set of results, if any.
+func (client DepartmentsClient) listByBillingAccountNameNextResults(ctx context.Context, lastResults DepartmentListResult) (result DepartmentListResult, err error) {
+ req, err := lastResults.departmentListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "billing.DepartmentsClient", "listByBillingAccountNameNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByBillingAccountNameSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "billing.DepartmentsClient", "listByBillingAccountNameNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByBillingAccountNameResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "billing.DepartmentsClient", "listByBillingAccountNameNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByBillingAccountNameComplete enumerates all values, automatically crossing page boundaries as required.
+func (client DepartmentsClient) ListByBillingAccountNameComplete(ctx context.Context, billingAccountName string, expand string, filter string) (result DepartmentListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DepartmentsClient.ListByBillingAccountName")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByBillingAccountName(ctx, billingAccountName, expand, filter)
+ return
+}
diff --git a/services/preview/billing/mgmt/2018-11-01-preview/billing/enrollmentaccounts.go b/services/preview/billing/mgmt/2018-11-01-preview/billing/enrollmentaccounts.go
index 93a24a2fa7b7..23f2e222e9ba 100644
--- a/services/preview/billing/mgmt/2018-11-01-preview/billing/enrollmentaccounts.go
+++ b/services/preview/billing/mgmt/2018-11-01-preview/billing/enrollmentaccounts.go
@@ -125,23 +125,24 @@ func (client EnrollmentAccountsClient) GetByEnrollmentAccountIDResponder(resp *h
return
}
-// ListByBillingAccountName lists all Enrollment Accounts for a user which he has access to.
+// ListByBillingAccountName lists all Enrollment Accounts for which a user has access.
// Parameters:
// billingAccountName - billing Account Id.
// expand - may be used to expand the department.
// filter - the filter supports 'eq', 'lt', 'gt', 'le', 'ge', and 'and'. It does not currently support 'ne',
// 'or', or 'not'. Tag filter is a key value pair string where key and value is separated by a colon (:).
-func (client EnrollmentAccountsClient) ListByBillingAccountName(ctx context.Context, billingAccountName string, expand string, filter string) (result EnrollmentAccountListResult, err error) {
+func (client EnrollmentAccountsClient) ListByBillingAccountName(ctx context.Context, billingAccountName string, expand string, filter string) (result EnrollmentAccountListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/EnrollmentAccountsClient.ListByBillingAccountName")
defer func() {
sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
+ if result.ealr.Response.Response != nil {
+ sc = result.ealr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
+ result.fn = client.listByBillingAccountNameNextResults
req, err := client.ListByBillingAccountNamePreparer(ctx, billingAccountName, expand, filter)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.EnrollmentAccountsClient", "ListByBillingAccountName", nil, "Failure preparing request")
@@ -150,12 +151,12 @@ func (client EnrollmentAccountsClient) ListByBillingAccountName(ctx context.Cont
resp, err := client.ListByBillingAccountNameSender(req)
if err != nil {
- result.Response = autorest.Response{Response: resp}
+ result.ealr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "billing.EnrollmentAccountsClient", "ListByBillingAccountName", resp, "Failure sending request")
return
}
- result, err = client.ListByBillingAccountNameResponder(resp)
+ result.ealr, err = client.ListByBillingAccountNameResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.EnrollmentAccountsClient", "ListByBillingAccountName", resp, "Failure responding to request")
}
@@ -207,3 +208,40 @@ func (client EnrollmentAccountsClient) ListByBillingAccountNameResponder(resp *h
result.Response = autorest.Response{Response: resp}
return
}
+
+// listByBillingAccountNameNextResults retrieves the next set of results, if any.
+func (client EnrollmentAccountsClient) listByBillingAccountNameNextResults(ctx context.Context, lastResults EnrollmentAccountListResult) (result EnrollmentAccountListResult, err error) {
+ req, err := lastResults.enrollmentAccountListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "billing.EnrollmentAccountsClient", "listByBillingAccountNameNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByBillingAccountNameSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "billing.EnrollmentAccountsClient", "listByBillingAccountNameNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByBillingAccountNameResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "billing.EnrollmentAccountsClient", "listByBillingAccountNameNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByBillingAccountNameComplete enumerates all values, automatically crossing page boundaries as required.
+func (client EnrollmentAccountsClient) ListByBillingAccountNameComplete(ctx context.Context, billingAccountName string, expand string, filter string) (result EnrollmentAccountListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/EnrollmentAccountsClient.ListByBillingAccountName")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByBillingAccountName(ctx, billingAccountName, expand, filter)
+ return
+}
diff --git a/services/preview/billing/mgmt/2018-11-01-preview/billing/invoices.go b/services/preview/billing/mgmt/2018-11-01-preview/billing/invoices.go
index ccd578af7e47..a777f9f5d36e 100644
--- a/services/preview/billing/mgmt/2018-11-01-preview/billing/invoices.go
+++ b/services/preview/billing/mgmt/2018-11-01-preview/billing/invoices.go
@@ -123,17 +123,18 @@ func (client InvoicesClient) GetResponder(resp *http.Response) (result InvoiceSu
// billingAccountName - billing Account Id.
// periodStartDate - invoice period start date.
// periodEndDate - invoice period end date.
-func (client InvoicesClient) ListByBillingAccountName(ctx context.Context, billingAccountName string, periodStartDate string, periodEndDate string) (result InvoiceListResult, err error) {
+func (client InvoicesClient) ListByBillingAccountName(ctx context.Context, billingAccountName string, periodStartDate string, periodEndDate string) (result InvoiceListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/InvoicesClient.ListByBillingAccountName")
defer func() {
sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
+ if result.ilr.Response.Response != nil {
+ sc = result.ilr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
+ result.fn = client.listByBillingAccountNameNextResults
req, err := client.ListByBillingAccountNamePreparer(ctx, billingAccountName, periodStartDate, periodEndDate)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.InvoicesClient", "ListByBillingAccountName", nil, "Failure preparing request")
@@ -142,12 +143,12 @@ func (client InvoicesClient) ListByBillingAccountName(ctx context.Context, billi
resp, err := client.ListByBillingAccountNameSender(req)
if err != nil {
- result.Response = autorest.Response{Response: resp}
+ result.ilr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "billing.InvoicesClient", "ListByBillingAccountName", resp, "Failure sending request")
return
}
- result, err = client.ListByBillingAccountNameResponder(resp)
+ result.ilr, err = client.ListByBillingAccountNameResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.InvoicesClient", "ListByBillingAccountName", resp, "Failure responding to request")
}
@@ -196,23 +197,61 @@ func (client InvoicesClient) ListByBillingAccountNameResponder(resp *http.Respon
return
}
+// listByBillingAccountNameNextResults retrieves the next set of results, if any.
+func (client InvoicesClient) listByBillingAccountNameNextResults(ctx context.Context, lastResults InvoiceListResult) (result InvoiceListResult, err error) {
+ req, err := lastResults.invoiceListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "billing.InvoicesClient", "listByBillingAccountNameNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByBillingAccountNameSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "billing.InvoicesClient", "listByBillingAccountNameNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByBillingAccountNameResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "billing.InvoicesClient", "listByBillingAccountNameNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByBillingAccountNameComplete enumerates all values, automatically crossing page boundaries as required.
+func (client InvoicesClient) ListByBillingAccountNameComplete(ctx context.Context, billingAccountName string, periodStartDate string, periodEndDate string) (result InvoiceListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/InvoicesClient.ListByBillingAccountName")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByBillingAccountName(ctx, billingAccountName, periodStartDate, periodEndDate)
+ return
+}
+
// ListByBillingProfile list of invoices for a billing profile.
// Parameters:
// billingAccountName - billing Account Id.
// billingProfileName - billing Profile Id.
// periodStartDate - invoice period start date.
// periodEndDate - invoice period end date.
-func (client InvoicesClient) ListByBillingProfile(ctx context.Context, billingAccountName string, billingProfileName string, periodStartDate string, periodEndDate string) (result InvoiceListResult, err error) {
+func (client InvoicesClient) ListByBillingProfile(ctx context.Context, billingAccountName string, billingProfileName string, periodStartDate string, periodEndDate string) (result InvoiceListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/InvoicesClient.ListByBillingProfile")
defer func() {
sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
+ if result.ilr.Response.Response != nil {
+ sc = result.ilr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
+ result.fn = client.listByBillingProfileNextResults
req, err := client.ListByBillingProfilePreparer(ctx, billingAccountName, billingProfileName, periodStartDate, periodEndDate)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.InvoicesClient", "ListByBillingProfile", nil, "Failure preparing request")
@@ -221,12 +260,12 @@ func (client InvoicesClient) ListByBillingProfile(ctx context.Context, billingAc
resp, err := client.ListByBillingProfileSender(req)
if err != nil {
- result.Response = autorest.Response{Response: resp}
+ result.ilr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "billing.InvoicesClient", "ListByBillingProfile", resp, "Failure sending request")
return
}
- result, err = client.ListByBillingProfileResponder(resp)
+ result.ilr, err = client.ListByBillingProfileResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.InvoicesClient", "ListByBillingProfile", resp, "Failure responding to request")
}
@@ -275,3 +314,40 @@ func (client InvoicesClient) ListByBillingProfileResponder(resp *http.Response)
result.Response = autorest.Response{Response: resp}
return
}
+
+// listByBillingProfileNextResults retrieves the next set of results, if any.
+func (client InvoicesClient) listByBillingProfileNextResults(ctx context.Context, lastResults InvoiceListResult) (result InvoiceListResult, err error) {
+ req, err := lastResults.invoiceListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "billing.InvoicesClient", "listByBillingProfileNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByBillingProfileSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "billing.InvoicesClient", "listByBillingProfileNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByBillingProfileResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "billing.InvoicesClient", "listByBillingProfileNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByBillingProfileComplete enumerates all values, automatically crossing page boundaries as required.
+func (client InvoicesClient) ListByBillingProfileComplete(ctx context.Context, billingAccountName string, billingProfileName string, periodStartDate string, periodEndDate string) (result InvoiceListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/InvoicesClient.ListByBillingProfile")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByBillingProfile(ctx, billingAccountName, billingProfileName, periodStartDate, periodEndDate)
+ return
+}
diff --git a/services/preview/billing/mgmt/2018-11-01-preview/billing/invoicesections.go b/services/preview/billing/mgmt/2018-11-01-preview/billing/invoicesections.go
index 61e494cbaad6..e90f3652cf4e 100644
--- a/services/preview/billing/mgmt/2018-11-01-preview/billing/invoicesections.go
+++ b/services/preview/billing/mgmt/2018-11-01-preview/billing/invoicesections.go
@@ -266,7 +266,7 @@ func (client InvoiceSectionsClient) GetResponder(resp *http.Response) (result In
return
}
-// ListByBillingAccountName lists all invoice sections for a user which he has access to.
+// ListByBillingAccountName lists all invoice sections for which a user has access.
// Parameters:
// billingAccountName - billing Account Id.
// expand - may be used to expand the billingProfiles.
@@ -344,21 +344,22 @@ func (client InvoiceSectionsClient) ListByBillingAccountNameResponder(resp *http
return
}
-// ListByBillingProfileName lists all invoice sections under a billing profile for a user which he has access to.
+// ListByBillingProfileName lists all invoice sections under a billing profile for which a user has access.
// Parameters:
// billingAccountName - billing Account Id.
// billingProfileName - billing Profile Id.
-func (client InvoiceSectionsClient) ListByBillingProfileName(ctx context.Context, billingAccountName string, billingProfileName string) (result InvoiceSectionListResult, err error) {
+func (client InvoiceSectionsClient) ListByBillingProfileName(ctx context.Context, billingAccountName string, billingProfileName string) (result InvoiceSectionListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/InvoiceSectionsClient.ListByBillingProfileName")
defer func() {
sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
+ if result.islr.Response.Response != nil {
+ sc = result.islr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
+ result.fn = client.listByBillingProfileNameNextResults
req, err := client.ListByBillingProfileNamePreparer(ctx, billingAccountName, billingProfileName)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.InvoiceSectionsClient", "ListByBillingProfileName", nil, "Failure preparing request")
@@ -367,12 +368,12 @@ func (client InvoiceSectionsClient) ListByBillingProfileName(ctx context.Context
resp, err := client.ListByBillingProfileNameSender(req)
if err != nil {
- result.Response = autorest.Response{Response: resp}
+ result.islr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "billing.InvoiceSectionsClient", "ListByBillingProfileName", resp, "Failure sending request")
return
}
- result, err = client.ListByBillingProfileNameResponder(resp)
+ result.islr, err = client.ListByBillingProfileNameResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.InvoiceSectionsClient", "ListByBillingProfileName", resp, "Failure responding to request")
}
@@ -420,21 +421,59 @@ func (client InvoiceSectionsClient) ListByBillingProfileNameResponder(resp *http
return
}
+// listByBillingProfileNameNextResults retrieves the next set of results, if any.
+func (client InvoiceSectionsClient) listByBillingProfileNameNextResults(ctx context.Context, lastResults InvoiceSectionListResult) (result InvoiceSectionListResult, err error) {
+ req, err := lastResults.invoiceSectionListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "billing.InvoiceSectionsClient", "listByBillingProfileNameNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByBillingProfileNameSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "billing.InvoiceSectionsClient", "listByBillingProfileNameNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByBillingProfileNameResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "billing.InvoiceSectionsClient", "listByBillingProfileNameNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByBillingProfileNameComplete enumerates all values, automatically crossing page boundaries as required.
+func (client InvoiceSectionsClient) ListByBillingProfileNameComplete(ctx context.Context, billingAccountName string, billingProfileName string) (result InvoiceSectionListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/InvoiceSectionsClient.ListByBillingProfileName")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByBillingProfileName(ctx, billingAccountName, billingProfileName)
+ return
+}
+
// ListByCreateSubscriptionPermission lists all invoiceSections with create subscription permission for a user.
// Parameters:
// billingAccountName - billing Account Id.
// expand - may be used to expand the billingProfiles.
-func (client InvoiceSectionsClient) ListByCreateSubscriptionPermission(ctx context.Context, billingAccountName string, expand string) (result InvoiceSectionListResult, err error) {
+func (client InvoiceSectionsClient) ListByCreateSubscriptionPermission(ctx context.Context, billingAccountName string, expand string) (result InvoiceSectionListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/InvoiceSectionsClient.ListByCreateSubscriptionPermission")
defer func() {
sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
+ if result.islr.Response.Response != nil {
+ sc = result.islr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
+ result.fn = client.listByCreateSubscriptionPermissionNextResults
req, err := client.ListByCreateSubscriptionPermissionPreparer(ctx, billingAccountName, expand)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.InvoiceSectionsClient", "ListByCreateSubscriptionPermission", nil, "Failure preparing request")
@@ -443,12 +482,12 @@ func (client InvoiceSectionsClient) ListByCreateSubscriptionPermission(ctx conte
resp, err := client.ListByCreateSubscriptionPermissionSender(req)
if err != nil {
- result.Response = autorest.Response{Response: resp}
+ result.islr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "billing.InvoiceSectionsClient", "ListByCreateSubscriptionPermission", resp, "Failure sending request")
return
}
- result, err = client.ListByCreateSubscriptionPermissionResponder(resp)
+ result.islr, err = client.ListByCreateSubscriptionPermissionResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.InvoiceSectionsClient", "ListByCreateSubscriptionPermission", resp, "Failure responding to request")
}
@@ -498,6 +537,43 @@ func (client InvoiceSectionsClient) ListByCreateSubscriptionPermissionResponder(
return
}
+// listByCreateSubscriptionPermissionNextResults retrieves the next set of results, if any.
+func (client InvoiceSectionsClient) listByCreateSubscriptionPermissionNextResults(ctx context.Context, lastResults InvoiceSectionListResult) (result InvoiceSectionListResult, err error) {
+ req, err := lastResults.invoiceSectionListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "billing.InvoiceSectionsClient", "listByCreateSubscriptionPermissionNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByCreateSubscriptionPermissionSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "billing.InvoiceSectionsClient", "listByCreateSubscriptionPermissionNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByCreateSubscriptionPermissionResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "billing.InvoiceSectionsClient", "listByCreateSubscriptionPermissionNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByCreateSubscriptionPermissionComplete enumerates all values, automatically crossing page boundaries as required.
+func (client InvoiceSectionsClient) ListByCreateSubscriptionPermissionComplete(ctx context.Context, billingAccountName string, expand string) (result InvoiceSectionListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/InvoiceSectionsClient.ListByCreateSubscriptionPermission")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByCreateSubscriptionPermission(ctx, billingAccountName, expand)
+ return
+}
+
// Update the operation to update a InvoiceSection.
// Parameters:
// billingAccountName - billing Account Id.
diff --git a/services/preview/billing/mgmt/2018-11-01-preview/billing/models.go b/services/preview/billing/mgmt/2018-11-01-preview/billing/models.go
index 0c33e5243237..b74e76db7a73 100644
--- a/services/preview/billing/mgmt/2018-11-01-preview/billing/models.go
+++ b/services/preview/billing/mgmt/2018-11-01-preview/billing/models.go
@@ -500,6 +500,143 @@ type AccountListResult struct {
NextLink *string `json:"nextLink,omitempty"`
}
+// AccountListResultIterator provides access to a complete listing of Account values.
+type AccountListResultIterator struct {
+ i int
+ page AccountListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *AccountListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *AccountListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter AccountListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter AccountListResultIterator) Response() AccountListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter AccountListResultIterator) Value() Account {
+ if !iter.page.NotDone() {
+ return Account{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the AccountListResultIterator type.
+func NewAccountListResultIterator(page AccountListResultPage) AccountListResultIterator {
+ return AccountListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (alr AccountListResult) IsEmpty() bool {
+ return alr.Value == nil || len(*alr.Value) == 0
+}
+
+// accountListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (alr AccountListResult) accountListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if alr.NextLink == nil || len(to.String(alr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(alr.NextLink)))
+}
+
+// AccountListResultPage contains a page of Account values.
+type AccountListResultPage struct {
+ fn func(context.Context, AccountListResult) (AccountListResult, error)
+ alr AccountListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *AccountListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AccountListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.alr)
+ if err != nil {
+ return err
+ }
+ page.alr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *AccountListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page AccountListResultPage) NotDone() bool {
+ return !page.alr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page AccountListResultPage) Response() AccountListResult {
+ return page.alr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page AccountListResultPage) Values() []Account {
+ if page.alr.IsEmpty() {
+ return nil
+ }
+ return *page.alr.Value
+}
+
+// Creates a new instance of the AccountListResultPage type.
+func NewAccountListResultPage(getNextPage func(context.Context, AccountListResult) (AccountListResult, error)) AccountListResultPage {
+ return AccountListResultPage{fn: getNextPage}
+}
+
// AccountProperties the properties of the billing account.
type AccountProperties struct {
// DisplayName - READ-ONLY; The billing account name.
@@ -700,6 +837,143 @@ type AgreementListResult struct {
NextLink *string `json:"nextLink,omitempty"`
}
+// AgreementListResultIterator provides access to a complete listing of Agreement values.
+type AgreementListResultIterator struct {
+ i int
+ page AgreementListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *AgreementListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AgreementListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *AgreementListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter AgreementListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter AgreementListResultIterator) Response() AgreementListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter AgreementListResultIterator) Value() Agreement {
+ if !iter.page.NotDone() {
+ return Agreement{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the AgreementListResultIterator type.
+func NewAgreementListResultIterator(page AgreementListResultPage) AgreementListResultIterator {
+ return AgreementListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (alr AgreementListResult) IsEmpty() bool {
+ return alr.Value == nil || len(*alr.Value) == 0
+}
+
+// agreementListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (alr AgreementListResult) agreementListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if alr.NextLink == nil || len(to.String(alr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(alr.NextLink)))
+}
+
+// AgreementListResultPage contains a page of Agreement values.
+type AgreementListResultPage struct {
+ fn func(context.Context, AgreementListResult) (AgreementListResult, error)
+ alr AgreementListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *AgreementListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AgreementListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.alr)
+ if err != nil {
+ return err
+ }
+ page.alr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *AgreementListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page AgreementListResultPage) NotDone() bool {
+ return !page.alr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page AgreementListResultPage) Response() AgreementListResult {
+ return page.alr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page AgreementListResultPage) Values() []Agreement {
+ if page.alr.IsEmpty() {
+ return nil
+ }
+ return *page.alr.Value
+}
+
+// Creates a new instance of the AgreementListResultPage type.
+func NewAgreementListResultPage(getNextPage func(context.Context, AgreementListResult) (AgreementListResult, error)) AgreementListResultPage {
+ return AgreementListResultPage{fn: getNextPage}
+}
+
// AgreementProperties the properties of the agreement.
type AgreementProperties struct {
// AgreementLink - READ-ONLY; The link to the agreement.
@@ -800,11 +1074,11 @@ type AvailableBalanceProperties struct {
Amount *Amount `json:"amount,omitempty"`
}
-// Department a department resource.
-type Department struct {
+// Customer a partner's customer.
+type Customer struct {
autorest.Response `json:"-"`
- // DepartmentProperties - A department.
- *DepartmentProperties `json:"properties,omitempty"`
+ // CustomerProperties - The customer.
+ *CustomerProperties `json:"properties,omitempty"`
// ID - READ-ONLY; Resource Id.
ID *string `json:"id,omitempty"`
// Name - READ-ONLY; Resource name.
@@ -813,17 +1087,17 @@ type Department struct {
Type *string `json:"type,omitempty"`
}
-// MarshalJSON is the custom marshaler for Department.
-func (d Department) MarshalJSON() ([]byte, error) {
+// MarshalJSON is the custom marshaler for Customer.
+func (c Customer) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
- if d.DepartmentProperties != nil {
- objectMap["properties"] = d.DepartmentProperties
+ if c.CustomerProperties != nil {
+ objectMap["properties"] = c.CustomerProperties
}
return json.Marshal(objectMap)
}
-// UnmarshalJSON is the custom unmarshaler for Department struct.
-func (d *Department) UnmarshalJSON(body []byte) error {
+// UnmarshalJSON is the custom unmarshaler for Customer struct.
+func (c *Customer) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
@@ -833,12 +1107,12 @@ func (d *Department) UnmarshalJSON(body []byte) error {
switch k {
case "properties":
if v != nil {
- var departmentProperties DepartmentProperties
- err = json.Unmarshal(*v, &departmentProperties)
+ var customerProperties CustomerProperties
+ err = json.Unmarshal(*v, &customerProperties)
if err != nil {
return err
}
- d.DepartmentProperties = &departmentProperties
+ c.CustomerProperties = &customerProperties
}
case "id":
if v != nil {
@@ -847,7 +1121,7 @@ func (d *Department) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- d.ID = &ID
+ c.ID = &ID
}
case "name":
if v != nil {
@@ -856,7 +1130,7 @@ func (d *Department) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- d.Name = &name
+ c.Name = &name
}
case "type":
if v != nil {
@@ -865,7 +1139,7 @@ func (d *Department) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- d.Type = &typeVar
+ c.Type = &typeVar
}
}
}
@@ -873,54 +1147,420 @@ func (d *Department) UnmarshalJSON(body []byte) error {
return nil
}
-// DepartmentListResult result of listing departments.
-type DepartmentListResult struct {
+// CustomerListResult result of listing customers.
+type CustomerListResult struct {
autorest.Response `json:"-"`
- // Value - READ-ONLY; The list of departments.
- Value *[]Department `json:"value,omitempty"`
+ // Value - READ-ONLY; The list of customers.
+ Value *[]Customer `json:"value,omitempty"`
// NextLink - READ-ONLY; The link (url) to the next page of results.
NextLink *string `json:"nextLink,omitempty"`
}
-// DepartmentProperties the properties of the department.
-type DepartmentProperties struct {
- // DepartmentName - The name for department.
- DepartmentName *string `json:"departmentName,omitempty"`
- // CostCenter - The cost center name.
- CostCenter *string `json:"costCenter,omitempty"`
- // Status - The status for department.
- Status *string `json:"status,omitempty"`
- // EnrollmentAccounts - Associated enrollment accounts. By default this is not populated, unless it's specified in $expand.
- EnrollmentAccounts *[]EnrollmentAccount `json:"enrollmentAccounts,omitempty"`
-}
-
-// DetailedTransferStatus detailed transfer status.
-type DetailedTransferStatus struct {
- // ProductType - READ-ONLY; Type of product being transferred. Possible values include: 'ProductTypeAzureSubscription', 'ProductTypeAzureReservation'
- ProductType ProductType `json:"productType,omitempty"`
- // ProductID - READ-ONLY; Id of product being transferred.
- ProductID *string `json:"productId,omitempty"`
- // TransferStatus - READ-ONLY; Transfer status. Possible values include: 'NotStarted', 'InProgress', 'Completed', 'Failed'
- TransferStatus ProductTransferStatus `json:"transferStatus,omitempty"`
- // ErrorDetails - Error details for transfer execution.
- ErrorDetails *Error `json:"errorDetails,omitempty"`
-}
-
-// DownloadProperties the properties of the invoice download.
-type DownloadProperties struct {
- // Kind - READ-ONLY; Document type. Possible values include: 'Invoice', 'VoidNote', 'Receipt', 'CreditNote'
- Kind Kind `json:"kind,omitempty"`
- // URL - READ-ONLY; Document URL.
- URL *string `json:"url,omitempty"`
+// CustomerListResultIterator provides access to a complete listing of Customer values.
+type CustomerListResultIterator struct {
+ i int
+ page CustomerListResultPage
}
-// DownloadURL a secure URL that can be used to download a an entity until the URL expires.
-type DownloadURL struct {
- autorest.Response `json:"-"`
- // ExpiryTime - READ-ONLY; The time in UTC at which this download URL will expire.
- ExpiryTime *date.Time `json:"expiryTime,omitempty"`
- // URL - READ-ONLY; The URL to the PDF file.
- URL *string `json:"url,omitempty"`
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *CustomerListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CustomerListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *CustomerListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter CustomerListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter CustomerListResultIterator) Response() CustomerListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter CustomerListResultIterator) Value() Customer {
+ if !iter.page.NotDone() {
+ return Customer{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the CustomerListResultIterator type.
+func NewCustomerListResultIterator(page CustomerListResultPage) CustomerListResultIterator {
+ return CustomerListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (clr CustomerListResult) IsEmpty() bool {
+ return clr.Value == nil || len(*clr.Value) == 0
+}
+
+// customerListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (clr CustomerListResult) customerListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if clr.NextLink == nil || len(to.String(clr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(clr.NextLink)))
+}
+
+// CustomerListResultPage contains a page of Customer values.
+type CustomerListResultPage struct {
+ fn func(context.Context, CustomerListResult) (CustomerListResult, error)
+ clr CustomerListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *CustomerListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CustomerListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.clr)
+ if err != nil {
+ return err
+ }
+ page.clr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *CustomerListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page CustomerListResultPage) NotDone() bool {
+ return !page.clr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page CustomerListResultPage) Response() CustomerListResult {
+ return page.clr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page CustomerListResultPage) Values() []Customer {
+ if page.clr.IsEmpty() {
+ return nil
+ }
+ return *page.clr.Value
+}
+
+// Creates a new instance of the CustomerListResultPage type.
+func NewCustomerListResultPage(getNextPage func(context.Context, CustomerListResult) (CustomerListResult, error)) CustomerListResultPage {
+ return CustomerListResultPage{fn: getNextPage}
+}
+
+// CustomerProperties the properties of a customer.
+type CustomerProperties struct {
+ // DisplayName - The name of the customer.
+ DisplayName *string `json:"displayName,omitempty"`
+ // EnabledAzureSKUs - Information about the product.
+ EnabledAzureSKUs *[]EnabledAzureSKUs `json:"enabledAzureSKUs,omitempty"`
+ // Resellers - The resellers which are allowed to provide service to this customer.
+ Resellers *[]Reseller `json:"resellers,omitempty"`
+}
+
+// Department a department resource.
+type Department struct {
+ autorest.Response `json:"-"`
+ // DepartmentProperties - A department.
+ *DepartmentProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Resource Id.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Resource name.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Resource type.
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for Department.
+func (d Department) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if d.DepartmentProperties != nil {
+ objectMap["properties"] = d.DepartmentProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for Department struct.
+func (d *Department) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var departmentProperties DepartmentProperties
+ err = json.Unmarshal(*v, &departmentProperties)
+ if err != nil {
+ return err
+ }
+ d.DepartmentProperties = &departmentProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ d.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ d.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ d.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// DepartmentListResult result of listing departments.
+type DepartmentListResult struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; The list of departments.
+ Value *[]Department `json:"value,omitempty"`
+ // NextLink - READ-ONLY; The link (url) to the next page of results.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// DepartmentListResultIterator provides access to a complete listing of Department values.
+type DepartmentListResultIterator struct {
+ i int
+ page DepartmentListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *DepartmentListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DepartmentListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *DepartmentListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter DepartmentListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter DepartmentListResultIterator) Response() DepartmentListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter DepartmentListResultIterator) Value() Department {
+ if !iter.page.NotDone() {
+ return Department{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the DepartmentListResultIterator type.
+func NewDepartmentListResultIterator(page DepartmentListResultPage) DepartmentListResultIterator {
+ return DepartmentListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (dlr DepartmentListResult) IsEmpty() bool {
+ return dlr.Value == nil || len(*dlr.Value) == 0
+}
+
+// departmentListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (dlr DepartmentListResult) departmentListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if dlr.NextLink == nil || len(to.String(dlr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(dlr.NextLink)))
+}
+
+// DepartmentListResultPage contains a page of Department values.
+type DepartmentListResultPage struct {
+ fn func(context.Context, DepartmentListResult) (DepartmentListResult, error)
+ dlr DepartmentListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *DepartmentListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DepartmentListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.dlr)
+ if err != nil {
+ return err
+ }
+ page.dlr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *DepartmentListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page DepartmentListResultPage) NotDone() bool {
+ return !page.dlr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page DepartmentListResultPage) Response() DepartmentListResult {
+ return page.dlr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page DepartmentListResultPage) Values() []Department {
+ if page.dlr.IsEmpty() {
+ return nil
+ }
+ return *page.dlr.Value
+}
+
+// Creates a new instance of the DepartmentListResultPage type.
+func NewDepartmentListResultPage(getNextPage func(context.Context, DepartmentListResult) (DepartmentListResult, error)) DepartmentListResultPage {
+ return DepartmentListResultPage{fn: getNextPage}
+}
+
+// DepartmentProperties the properties of the department.
+type DepartmentProperties struct {
+ // DepartmentName - The name for department.
+ DepartmentName *string `json:"departmentName,omitempty"`
+ // CostCenter - The cost center name.
+ CostCenter *string `json:"costCenter,omitempty"`
+ // Status - The status for department.
+ Status *string `json:"status,omitempty"`
+ // EnrollmentAccounts - Associated enrollment accounts. By default this is not populated, unless it's specified in $expand.
+ EnrollmentAccounts *[]EnrollmentAccount `json:"enrollmentAccounts,omitempty"`
+}
+
+// DetailedTransferStatus detailed transfer status.
+type DetailedTransferStatus struct {
+ // ProductType - READ-ONLY; Type of product being transferred. Possible values include: 'ProductTypeAzureSubscription', 'ProductTypeAzureReservation'
+ ProductType ProductType `json:"productType,omitempty"`
+ // ProductID - READ-ONLY; Id of product being transferred.
+ ProductID *string `json:"productId,omitempty"`
+ // TransferStatus - READ-ONLY; Transfer status. Possible values include: 'NotStarted', 'InProgress', 'Completed', 'Failed'
+ TransferStatus ProductTransferStatus `json:"transferStatus,omitempty"`
+ // ErrorDetails - Error details for transfer execution.
+ ErrorDetails *Error `json:"errorDetails,omitempty"`
+}
+
+// DownloadProperties the properties of the invoice download.
+type DownloadProperties struct {
+ // Kind - READ-ONLY; Document type. Possible values include: 'Invoice', 'VoidNote', 'Receipt', 'CreditNote'
+ Kind Kind `json:"kind,omitempty"`
+ // URL - READ-ONLY; Document URL.
+ URL *string `json:"url,omitempty"`
+}
+
+// DownloadURL a secure URL that can be used to download a an entity until the URL expires.
+type DownloadURL struct {
+ autorest.Response `json:"-"`
+ // ExpiryTime - READ-ONLY; The time in UTC at which this download URL will expire.
+ ExpiryTime *date.Time `json:"expiryTime,omitempty"`
+ // URL - READ-ONLY; The URL to the PDF file.
+ URL *string `json:"url,omitempty"`
}
// EnabledAzureSKUs details about the enabled azure sku.
@@ -1047,6 +1687,143 @@ type EnrollmentAccountListResult struct {
NextLink *string `json:"nextLink,omitempty"`
}
+// EnrollmentAccountListResultIterator provides access to a complete listing of EnrollmentAccount values.
+type EnrollmentAccountListResultIterator struct {
+ i int
+ page EnrollmentAccountListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *EnrollmentAccountListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/EnrollmentAccountListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *EnrollmentAccountListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter EnrollmentAccountListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter EnrollmentAccountListResultIterator) Response() EnrollmentAccountListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter EnrollmentAccountListResultIterator) Value() EnrollmentAccount {
+ if !iter.page.NotDone() {
+ return EnrollmentAccount{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the EnrollmentAccountListResultIterator type.
+func NewEnrollmentAccountListResultIterator(page EnrollmentAccountListResultPage) EnrollmentAccountListResultIterator {
+ return EnrollmentAccountListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (ealr EnrollmentAccountListResult) IsEmpty() bool {
+ return ealr.Value == nil || len(*ealr.Value) == 0
+}
+
+// enrollmentAccountListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (ealr EnrollmentAccountListResult) enrollmentAccountListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if ealr.NextLink == nil || len(to.String(ealr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(ealr.NextLink)))
+}
+
+// EnrollmentAccountListResultPage contains a page of EnrollmentAccount values.
+type EnrollmentAccountListResultPage struct {
+ fn func(context.Context, EnrollmentAccountListResult) (EnrollmentAccountListResult, error)
+ ealr EnrollmentAccountListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *EnrollmentAccountListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/EnrollmentAccountListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.ealr)
+ if err != nil {
+ return err
+ }
+ page.ealr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *EnrollmentAccountListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page EnrollmentAccountListResultPage) NotDone() bool {
+ return !page.ealr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page EnrollmentAccountListResultPage) Response() EnrollmentAccountListResult {
+ return page.ealr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page EnrollmentAccountListResultPage) Values() []EnrollmentAccount {
+ if page.ealr.IsEmpty() {
+ return nil
+ }
+ return *page.ealr.Value
+}
+
+// Creates a new instance of the EnrollmentAccountListResultPage type.
+func NewEnrollmentAccountListResultPage(getNextPage func(context.Context, EnrollmentAccountListResult) (EnrollmentAccountListResult, error)) EnrollmentAccountListResultPage {
+ return EnrollmentAccountListResultPage{fn: getNextPage}
+}
+
// EnrollmentAccountProperties the properties of the account.
type EnrollmentAccountProperties struct {
// AccountName - The account name.
@@ -1110,52 +1887,189 @@ type InitiateTransferProperties struct {
RecipientEmailID *string `json:"recipientEmailId,omitempty"`
}
-// InitiateTransferRequest request parameters to initiate transfer.
-type InitiateTransferRequest struct {
- // InitiateTransferProperties - Request parameters to initiate transfer.
- *InitiateTransferProperties `json:"properties,omitempty"`
+// InitiateTransferRequest request parameters to initiate transfer.
+type InitiateTransferRequest struct {
+ // InitiateTransferProperties - Request parameters to initiate transfer.
+ *InitiateTransferProperties `json:"properties,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for InitiateTransferRequest.
+func (itr InitiateTransferRequest) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if itr.InitiateTransferProperties != nil {
+ objectMap["properties"] = itr.InitiateTransferProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for InitiateTransferRequest struct.
+func (itr *InitiateTransferRequest) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var initiateTransferProperties InitiateTransferProperties
+ err = json.Unmarshal(*v, &initiateTransferProperties)
+ if err != nil {
+ return err
+ }
+ itr.InitiateTransferProperties = &initiateTransferProperties
+ }
+ }
+ }
+
+ return nil
+}
+
+// InvoiceListResult result of listing invoices.
+type InvoiceListResult struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; The list of invoices.
+ Value *[]InvoiceSummary `json:"value,omitempty"`
+ // NextLink - READ-ONLY; The link (url) to the next page of results.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// InvoiceListResultIterator provides access to a complete listing of InvoiceSummary values.
+type InvoiceListResultIterator struct {
+ i int
+ page InvoiceListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *InvoiceListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/InvoiceListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *InvoiceListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter InvoiceListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter InvoiceListResultIterator) Response() InvoiceListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter InvoiceListResultIterator) Value() InvoiceSummary {
+ if !iter.page.NotDone() {
+ return InvoiceSummary{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the InvoiceListResultIterator type.
+func NewInvoiceListResultIterator(page InvoiceListResultPage) InvoiceListResultIterator {
+ return InvoiceListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (ilr InvoiceListResult) IsEmpty() bool {
+ return ilr.Value == nil || len(*ilr.Value) == 0
+}
+
+// invoiceListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (ilr InvoiceListResult) invoiceListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if ilr.NextLink == nil || len(to.String(ilr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(ilr.NextLink)))
+}
+
+// InvoiceListResultPage contains a page of InvoiceSummary values.
+type InvoiceListResultPage struct {
+ fn func(context.Context, InvoiceListResult) (InvoiceListResult, error)
+ ilr InvoiceListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *InvoiceListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/InvoiceListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.ilr)
+ if err != nil {
+ return err
+ }
+ page.ilr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *InvoiceListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page InvoiceListResultPage) NotDone() bool {
+ return !page.ilr.IsEmpty()
}
-// MarshalJSON is the custom marshaler for InitiateTransferRequest.
-func (itr InitiateTransferRequest) MarshalJSON() ([]byte, error) {
- objectMap := make(map[string]interface{})
- if itr.InitiateTransferProperties != nil {
- objectMap["properties"] = itr.InitiateTransferProperties
- }
- return json.Marshal(objectMap)
+// Response returns the raw server response from the last page request.
+func (page InvoiceListResultPage) Response() InvoiceListResult {
+ return page.ilr
}
-// UnmarshalJSON is the custom unmarshaler for InitiateTransferRequest struct.
-func (itr *InitiateTransferRequest) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "properties":
- if v != nil {
- var initiateTransferProperties InitiateTransferProperties
- err = json.Unmarshal(*v, &initiateTransferProperties)
- if err != nil {
- return err
- }
- itr.InitiateTransferProperties = &initiateTransferProperties
- }
- }
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page InvoiceListResultPage) Values() []InvoiceSummary {
+ if page.ilr.IsEmpty() {
+ return nil
}
-
- return nil
+ return *page.ilr.Value
}
-// InvoiceListResult result of listing invoices.
-type InvoiceListResult struct {
- autorest.Response `json:"-"`
- // Value - READ-ONLY; The list of invoices.
- Value *[]InvoiceSummary `json:"value,omitempty"`
- // NextLink - READ-ONLY; The link (url) to the next page of results.
- NextLink *string `json:"nextLink,omitempty"`
+// Creates a new instance of the InvoiceListResultPage type.
+func NewInvoiceListResultPage(getNextPage func(context.Context, InvoiceListResult) (InvoiceListResult, error)) InvoiceListResultPage {
+ return InvoiceListResultPage{fn: getNextPage}
}
// InvoiceSection an InvoiceSection resource.
@@ -1248,6 +2162,143 @@ type InvoiceSectionListResult struct {
NextLink *string `json:"nextLink,omitempty"`
}
+// InvoiceSectionListResultIterator provides access to a complete listing of InvoiceSection values.
+type InvoiceSectionListResultIterator struct {
+ i int
+ page InvoiceSectionListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *InvoiceSectionListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/InvoiceSectionListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *InvoiceSectionListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter InvoiceSectionListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter InvoiceSectionListResultIterator) Response() InvoiceSectionListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter InvoiceSectionListResultIterator) Value() InvoiceSection {
+ if !iter.page.NotDone() {
+ return InvoiceSection{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the InvoiceSectionListResultIterator type.
+func NewInvoiceSectionListResultIterator(page InvoiceSectionListResultPage) InvoiceSectionListResultIterator {
+ return InvoiceSectionListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (islr InvoiceSectionListResult) IsEmpty() bool {
+ return islr.Value == nil || len(*islr.Value) == 0
+}
+
+// invoiceSectionListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (islr InvoiceSectionListResult) invoiceSectionListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if islr.NextLink == nil || len(to.String(islr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(islr.NextLink)))
+}
+
+// InvoiceSectionListResultPage contains a page of InvoiceSection values.
+type InvoiceSectionListResultPage struct {
+ fn func(context.Context, InvoiceSectionListResult) (InvoiceSectionListResult, error)
+ islr InvoiceSectionListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *InvoiceSectionListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/InvoiceSectionListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.islr)
+ if err != nil {
+ return err
+ }
+ page.islr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *InvoiceSectionListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page InvoiceSectionListResultPage) NotDone() bool {
+ return !page.islr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page InvoiceSectionListResultPage) Response() InvoiceSectionListResult {
+ return page.islr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page InvoiceSectionListResultPage) Values() []InvoiceSection {
+ if page.islr.IsEmpty() {
+ return nil
+ }
+ return *page.islr.Value
+}
+
+// Creates a new instance of the InvoiceSectionListResultPage type.
+func NewInvoiceSectionListResultPage(getNextPage func(context.Context, InvoiceSectionListResult) (InvoiceSectionListResult, error)) InvoiceSectionListResultPage {
+ return InvoiceSectionListResultPage{fn: getNextPage}
+}
+
// InvoiceSectionProperties the properties of an InvoiceSection.
type InvoiceSectionProperties struct {
// DisplayName - The name of the InvoiceSection.
@@ -2443,6 +3494,143 @@ type ProfileListResult struct {
NextLink *string `json:"nextLink,omitempty"`
}
+// ProfileListResultIterator provides access to a complete listing of Profile values.
+type ProfileListResultIterator struct {
+ i int
+ page ProfileListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *ProfileListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ProfileListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *ProfileListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter ProfileListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter ProfileListResultIterator) Response() ProfileListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter ProfileListResultIterator) Value() Profile {
+ if !iter.page.NotDone() {
+ return Profile{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the ProfileListResultIterator type.
+func NewProfileListResultIterator(page ProfileListResultPage) ProfileListResultIterator {
+ return ProfileListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (plr ProfileListResult) IsEmpty() bool {
+ return plr.Value == nil || len(*plr.Value) == 0
+}
+
+// profileListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (plr ProfileListResult) profileListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if plr.NextLink == nil || len(to.String(plr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(plr.NextLink)))
+}
+
+// ProfileListResultPage contains a page of Profile values.
+type ProfileListResultPage struct {
+ fn func(context.Context, ProfileListResult) (ProfileListResult, error)
+ plr ProfileListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *ProfileListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ProfileListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.plr)
+ if err != nil {
+ return err
+ }
+ page.plr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *ProfileListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page ProfileListResultPage) NotDone() bool {
+ return !page.plr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page ProfileListResultPage) Response() ProfileListResult {
+ return page.plr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page ProfileListResultPage) Values() []Profile {
+ if page.plr.IsEmpty() {
+ return nil
+ }
+ return *page.plr.Value
+}
+
+// Creates a new instance of the ProfileListResultPage type.
+func NewProfileListResultPage(getNextPage func(context.Context, ProfileListResult) (ProfileListResult, error)) ProfileListResultPage {
+ return ProfileListResultPage{fn: getNextPage}
+}
+
// ProfileProperties the properties of the billing profile.
type ProfileProperties struct {
// DisplayName - The billing profile name.
@@ -2833,6 +4021,14 @@ type RecipientTransferProperties struct {
DetailedTransferStatus *[]DetailedTransferStatus `json:"detailedTransferStatus,omitempty"`
}
+// Reseller details about a reseller.
+type Reseller struct {
+ // ResellerID - READ-ONLY; The reseller id.
+ ResellerID *string `json:"resellerId,omitempty"`
+ // Description - READ-ONLY; A description of the reseller.
+ Description *string `json:"description,omitempty"`
+}
+
// Resource the Resource model definition.
type Resource struct {
// ID - READ-ONLY; Resource Id.
@@ -3106,6 +4302,10 @@ type SubscriptionProperties struct {
BillingProfileID *string `json:"billingProfileId,omitempty"`
// BillingProfileName - READ-ONLY; Billing Profile name to which this product belongs.
BillingProfileName *string `json:"billingProfileName,omitempty"`
+ // CustomerID - READ-ONLY; Customer id to which this product belongs.
+ CustomerID *string `json:"customerId,omitempty"`
+ // CustomerDisplayName - READ-ONLY; Display name of customer to which this product belongs.
+ CustomerDisplayName *string `json:"customerDisplayName,omitempty"`
// InvoiceSectionID - READ-ONLY; Invoice section id to which this product belongs.
InvoiceSectionID *string `json:"invoiceSectionId,omitempty"`
// InvoiceSectionName - READ-ONLY; Invoice section name to which this product belongs.
@@ -3114,6 +4314,8 @@ type SubscriptionProperties struct {
SkuID *string `json:"skuId,omitempty"`
// SkuDescription - READ-ONLY; The sku description.
SkuDescription *string `json:"skuDescription,omitempty"`
+ // Reseller - READ-ONLY; Reseller for this subscription.
+ Reseller *Reseller `json:"reseller,omitempty"`
}
// SubscriptionsListResult result of listing billing subscriptions summary.
@@ -3606,6 +4808,10 @@ type TransactionsSummaryProperties struct {
TransactionAmount *Amount `json:"transactionAmount,omitempty"`
// Quantity - READ-ONLY; Purchase quantity.
Quantity *int32 `json:"quantity,omitempty"`
+ // CustomerID - READ-ONLY; Customer id to which this product belongs.
+ CustomerID *string `json:"customerId,omitempty"`
+ // CustomerDisplayName - READ-ONLY; Display name of customer to which this product belongs.
+ CustomerDisplayName *string `json:"customerDisplayName,omitempty"`
// InvoiceSectionID - READ-ONLY; Invoice section id to which this product belongs.
InvoiceSectionID *string `json:"invoiceSectionId,omitempty"`
// InvoiceSectionName - READ-ONLY; Invoice section name to which this product belongs.
diff --git a/services/preview/billing/mgmt/2018-11-01-preview/billing/permissions.go b/services/preview/billing/mgmt/2018-11-01-preview/billing/permissions.go
index 38e0f4a8bff2..f1fdc51735e3 100644
--- a/services/preview/billing/mgmt/2018-11-01-preview/billing/permissions.go
+++ b/services/preview/billing/mgmt/2018-11-01-preview/billing/permissions.go
@@ -190,6 +190,82 @@ func (client PermissionsClient) ListByBillingProfileResponder(resp *http.Respons
return
}
+// ListByCustomers lists all billing permissions for the caller under customer.
+// Parameters:
+// billingAccountName - billing Account Id.
+// customerName - customer Id.
+func (client PermissionsClient) ListByCustomers(ctx context.Context, billingAccountName string, customerName string) (result PermissionsListResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PermissionsClient.ListByCustomers")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.ListByCustomersPreparer(ctx, billingAccountName, customerName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "billing.PermissionsClient", "ListByCustomers", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByCustomersSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "billing.PermissionsClient", "ListByCustomers", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListByCustomersResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "billing.PermissionsClient", "ListByCustomers", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByCustomersPreparer prepares the ListByCustomers request.
+func (client PermissionsClient) ListByCustomersPreparer(ctx context.Context, billingAccountName string, customerName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "billingAccountName": autorest.Encode("path", billingAccountName),
+ "customerName": autorest.Encode("path", customerName),
+ }
+
+ const APIVersion = "2018-11-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/customers/{customerName}/providers/Microsoft.Billing/billingPermissions", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByCustomersSender sends the ListByCustomers request. The method will close the
+// http.Response Body if it receives an error.
+func (client PermissionsClient) ListByCustomersSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByCustomersResponder handles the response to the ListByCustomers request. The method always
+// closes the http.Response Body.
+func (client PermissionsClient) ListByCustomersResponder(resp *http.Response) (result PermissionsListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
// ListByInvoiceSections lists all billing permissions for the caller under invoice section.
// Parameters:
// billingAccountName - billing Account Id.
diff --git a/services/preview/billing/mgmt/2018-11-01-preview/billing/products.go b/services/preview/billing/mgmt/2018-11-01-preview/billing/products.go
index b69105e4c645..69e700e8a92b 100644
--- a/services/preview/billing/mgmt/2018-11-01-preview/billing/products.go
+++ b/services/preview/billing/mgmt/2018-11-01-preview/billing/products.go
@@ -243,17 +243,18 @@ func (client ProductsClient) ListByBillingAccountNameComplete(ctx context.Contex
// filter - may be used to filter by product type. The filter supports 'eq', 'lt', 'gt', 'le', 'ge', and 'and'.
// It does not currently support 'ne', 'or', or 'not'. Tag filter is a key value pair string where key and
// value is separated by a colon (:).
-func (client ProductsClient) ListByInvoiceSectionName(ctx context.Context, billingAccountName string, invoiceSectionName string, filter string) (result ProductsListResult, err error) {
+func (client ProductsClient) ListByInvoiceSectionName(ctx context.Context, billingAccountName string, invoiceSectionName string, filter string) (result ProductsListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ProductsClient.ListByInvoiceSectionName")
defer func() {
sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
+ if result.plr.Response.Response != nil {
+ sc = result.plr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
+ result.fn = client.listByInvoiceSectionNameNextResults
req, err := client.ListByInvoiceSectionNamePreparer(ctx, billingAccountName, invoiceSectionName, filter)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.ProductsClient", "ListByInvoiceSectionName", nil, "Failure preparing request")
@@ -262,12 +263,12 @@ func (client ProductsClient) ListByInvoiceSectionName(ctx context.Context, billi
resp, err := client.ListByInvoiceSectionNameSender(req)
if err != nil {
- result.Response = autorest.Response{Response: resp}
+ result.plr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "billing.ProductsClient", "ListByInvoiceSectionName", resp, "Failure sending request")
return
}
- result, err = client.ListByInvoiceSectionNameResponder(resp)
+ result.plr, err = client.ListByInvoiceSectionNameResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.ProductsClient", "ListByInvoiceSectionName", resp, "Failure responding to request")
}
@@ -318,6 +319,43 @@ func (client ProductsClient) ListByInvoiceSectionNameResponder(resp *http.Respon
return
}
+// listByInvoiceSectionNameNextResults retrieves the next set of results, if any.
+func (client ProductsClient) listByInvoiceSectionNameNextResults(ctx context.Context, lastResults ProductsListResult) (result ProductsListResult, err error) {
+ req, err := lastResults.productsListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "billing.ProductsClient", "listByInvoiceSectionNameNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByInvoiceSectionNameSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "billing.ProductsClient", "listByInvoiceSectionNameNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByInvoiceSectionNameResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "billing.ProductsClient", "listByInvoiceSectionNameNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByInvoiceSectionNameComplete enumerates all values, automatically crossing page boundaries as required.
+func (client ProductsClient) ListByInvoiceSectionNameComplete(ctx context.Context, billingAccountName string, invoiceSectionName string, filter string) (result ProductsListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ProductsClient.ListByInvoiceSectionName")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByInvoiceSectionName(ctx, billingAccountName, invoiceSectionName, filter)
+ return
+}
+
// Transfer the operation to transfer a Product to another invoice section.
// Parameters:
// billingAccountName - billing Account Id.
diff --git a/services/preview/billing/mgmt/2018-11-01-preview/billing/profiles.go b/services/preview/billing/mgmt/2018-11-01-preview/billing/profiles.go
index 8c72d1a55103..fe4182176f86 100644
--- a/services/preview/billing/mgmt/2018-11-01-preview/billing/profiles.go
+++ b/services/preview/billing/mgmt/2018-11-01-preview/billing/profiles.go
@@ -201,17 +201,18 @@ func (client ProfilesClient) GetResponder(resp *http.Response) (result Profile,
// Parameters:
// billingAccountName - billing Account Id.
// expand - may be used to expand the invoiceSections.
-func (client ProfilesClient) ListByBillingAccountName(ctx context.Context, billingAccountName string, expand string) (result ProfileListResult, err error) {
+func (client ProfilesClient) ListByBillingAccountName(ctx context.Context, billingAccountName string, expand string) (result ProfileListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ProfilesClient.ListByBillingAccountName")
defer func() {
sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
+ if result.plr.Response.Response != nil {
+ sc = result.plr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
+ result.fn = client.listByBillingAccountNameNextResults
req, err := client.ListByBillingAccountNamePreparer(ctx, billingAccountName, expand)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.ProfilesClient", "ListByBillingAccountName", nil, "Failure preparing request")
@@ -220,12 +221,12 @@ func (client ProfilesClient) ListByBillingAccountName(ctx context.Context, billi
resp, err := client.ListByBillingAccountNameSender(req)
if err != nil {
- result.Response = autorest.Response{Response: resp}
+ result.plr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "billing.ProfilesClient", "ListByBillingAccountName", resp, "Failure sending request")
return
}
- result, err = client.ListByBillingAccountNameResponder(resp)
+ result.plr, err = client.ListByBillingAccountNameResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.ProfilesClient", "ListByBillingAccountName", resp, "Failure responding to request")
}
@@ -275,6 +276,43 @@ func (client ProfilesClient) ListByBillingAccountNameResponder(resp *http.Respon
return
}
+// listByBillingAccountNameNextResults retrieves the next set of results, if any.
+func (client ProfilesClient) listByBillingAccountNameNextResults(ctx context.Context, lastResults ProfileListResult) (result ProfileListResult, err error) {
+ req, err := lastResults.profileListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "billing.ProfilesClient", "listByBillingAccountNameNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByBillingAccountNameSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "billing.ProfilesClient", "listByBillingAccountNameNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByBillingAccountNameResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "billing.ProfilesClient", "listByBillingAccountNameNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByBillingAccountNameComplete enumerates all values, automatically crossing page boundaries as required.
+func (client ProfilesClient) ListByBillingAccountNameComplete(ctx context.Context, billingAccountName string, expand string) (result ProfileListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ProfilesClient.ListByBillingAccountName")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByBillingAccountName(ctx, billingAccountName, expand)
+ return
+}
+
// Update the operation to update a billing profile.
// Parameters:
// billingAccountName - billing Account Id.
diff --git a/services/preview/billing/mgmt/2018-11-01-preview/billing/subscriptions.go b/services/preview/billing/mgmt/2018-11-01-preview/billing/subscriptions.go
index b9bb84bb58fe..6bd533d78044 100644
--- a/services/preview/billing/mgmt/2018-11-01-preview/billing/subscriptions.go
+++ b/services/preview/billing/mgmt/2018-11-01-preview/billing/subscriptions.go
@@ -118,6 +118,84 @@ func (client SubscriptionsClient) GetResponder(resp *http.Response) (result Subs
return
}
+// GetByCustomerName get a single billing subscription by name.
+// Parameters:
+// billingAccountName - billing Account Id.
+// customerName - customer Id.
+// billingSubscriptionName - billing Subscription Id.
+func (client SubscriptionsClient) GetByCustomerName(ctx context.Context, billingAccountName string, customerName string, billingSubscriptionName string) (result SubscriptionSummary, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SubscriptionsClient.GetByCustomerName")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetByCustomerNamePreparer(ctx, billingAccountName, customerName, billingSubscriptionName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "billing.SubscriptionsClient", "GetByCustomerName", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetByCustomerNameSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "billing.SubscriptionsClient", "GetByCustomerName", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetByCustomerNameResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "billing.SubscriptionsClient", "GetByCustomerName", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetByCustomerNamePreparer prepares the GetByCustomerName request.
+func (client SubscriptionsClient) GetByCustomerNamePreparer(ctx context.Context, billingAccountName string, customerName string, billingSubscriptionName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "billingAccountName": autorest.Encode("path", billingAccountName),
+ "billingSubscriptionName": autorest.Encode("path", billingSubscriptionName),
+ "customerName": autorest.Encode("path", customerName),
+ }
+
+ const APIVersion = "2018-11-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/customers/{customerName}/billingSubscriptions/{billingSubscriptionName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetByCustomerNameSender sends the GetByCustomerName request. The method will close the
+// http.Response Body if it receives an error.
+func (client SubscriptionsClient) GetByCustomerNameSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetByCustomerNameResponder handles the response to the GetByCustomerName request. The method always
+// closes the http.Response Body.
+func (client SubscriptionsClient) GetByCustomerNameResponder(resp *http.Response) (result SubscriptionSummary, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
// ListByBillingAccountName lists billing subscriptions by billing account name.
// Parameters:
// billingAccountName - billing Account Id.
@@ -234,17 +312,18 @@ func (client SubscriptionsClient) ListByBillingAccountNameComplete(ctx context.C
// Parameters:
// billingAccountName - billing Account Id.
// billingProfileName - billing Profile Id.
-func (client SubscriptionsClient) ListByBillingProfileName(ctx context.Context, billingAccountName string, billingProfileName string) (result SubscriptionsListResult, err error) {
+func (client SubscriptionsClient) ListByBillingProfileName(ctx context.Context, billingAccountName string, billingProfileName string) (result SubscriptionsListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/SubscriptionsClient.ListByBillingProfileName")
defer func() {
sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
+ if result.slr.Response.Response != nil {
+ sc = result.slr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
+ result.fn = client.listByBillingProfileNameNextResults
req, err := client.ListByBillingProfileNamePreparer(ctx, billingAccountName, billingProfileName)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.SubscriptionsClient", "ListByBillingProfileName", nil, "Failure preparing request")
@@ -253,12 +332,12 @@ func (client SubscriptionsClient) ListByBillingProfileName(ctx context.Context,
resp, err := client.ListByBillingProfileNameSender(req)
if err != nil {
- result.Response = autorest.Response{Response: resp}
+ result.slr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "billing.SubscriptionsClient", "ListByBillingProfileName", resp, "Failure sending request")
return
}
- result, err = client.ListByBillingProfileNameResponder(resp)
+ result.slr, err = client.ListByBillingProfileNameResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.SubscriptionsClient", "ListByBillingProfileName", resp, "Failure responding to request")
}
@@ -306,21 +385,173 @@ func (client SubscriptionsClient) ListByBillingProfileNameResponder(resp *http.R
return
}
+// listByBillingProfileNameNextResults retrieves the next set of results, if any.
+func (client SubscriptionsClient) listByBillingProfileNameNextResults(ctx context.Context, lastResults SubscriptionsListResult) (result SubscriptionsListResult, err error) {
+ req, err := lastResults.subscriptionsListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "billing.SubscriptionsClient", "listByBillingProfileNameNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByBillingProfileNameSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "billing.SubscriptionsClient", "listByBillingProfileNameNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByBillingProfileNameResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "billing.SubscriptionsClient", "listByBillingProfileNameNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByBillingProfileNameComplete enumerates all values, automatically crossing page boundaries as required.
+func (client SubscriptionsClient) ListByBillingProfileNameComplete(ctx context.Context, billingAccountName string, billingProfileName string) (result SubscriptionsListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SubscriptionsClient.ListByBillingProfileName")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByBillingProfileName(ctx, billingAccountName, billingProfileName)
+ return
+}
+
+// ListByCustomerName lists billing subscription by customer name.
+// Parameters:
+// billingAccountName - billing Account Id.
+// customerName - customer Id.
+func (client SubscriptionsClient) ListByCustomerName(ctx context.Context, billingAccountName string, customerName string) (result SubscriptionsListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SubscriptionsClient.ListByCustomerName")
+ defer func() {
+ sc := -1
+ if result.slr.Response.Response != nil {
+ sc = result.slr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listByCustomerNameNextResults
+ req, err := client.ListByCustomerNamePreparer(ctx, billingAccountName, customerName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "billing.SubscriptionsClient", "ListByCustomerName", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByCustomerNameSender(req)
+ if err != nil {
+ result.slr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "billing.SubscriptionsClient", "ListByCustomerName", resp, "Failure sending request")
+ return
+ }
+
+ result.slr, err = client.ListByCustomerNameResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "billing.SubscriptionsClient", "ListByCustomerName", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByCustomerNamePreparer prepares the ListByCustomerName request.
+func (client SubscriptionsClient) ListByCustomerNamePreparer(ctx context.Context, billingAccountName string, customerName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "billingAccountName": autorest.Encode("path", billingAccountName),
+ "customerName": autorest.Encode("path", customerName),
+ }
+
+ const APIVersion = "2018-11-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/customers/{customerName}/billingSubscriptions", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByCustomerNameSender sends the ListByCustomerName request. The method will close the
+// http.Response Body if it receives an error.
+func (client SubscriptionsClient) ListByCustomerNameSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByCustomerNameResponder handles the response to the ListByCustomerName request. The method always
+// closes the http.Response Body.
+func (client SubscriptionsClient) ListByCustomerNameResponder(resp *http.Response) (result SubscriptionsListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByCustomerNameNextResults retrieves the next set of results, if any.
+func (client SubscriptionsClient) listByCustomerNameNextResults(ctx context.Context, lastResults SubscriptionsListResult) (result SubscriptionsListResult, err error) {
+ req, err := lastResults.subscriptionsListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "billing.SubscriptionsClient", "listByCustomerNameNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByCustomerNameSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "billing.SubscriptionsClient", "listByCustomerNameNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByCustomerNameResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "billing.SubscriptionsClient", "listByCustomerNameNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByCustomerNameComplete enumerates all values, automatically crossing page boundaries as required.
+func (client SubscriptionsClient) ListByCustomerNameComplete(ctx context.Context, billingAccountName string, customerName string) (result SubscriptionsListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SubscriptionsClient.ListByCustomerName")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByCustomerName(ctx, billingAccountName, customerName)
+ return
+}
+
// ListByInvoiceSectionName lists billing subscription by invoice section name.
// Parameters:
// billingAccountName - billing Account Id.
// invoiceSectionName - invoiceSection Id.
-func (client SubscriptionsClient) ListByInvoiceSectionName(ctx context.Context, billingAccountName string, invoiceSectionName string) (result SubscriptionsListResult, err error) {
+func (client SubscriptionsClient) ListByInvoiceSectionName(ctx context.Context, billingAccountName string, invoiceSectionName string) (result SubscriptionsListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/SubscriptionsClient.ListByInvoiceSectionName")
defer func() {
sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
+ if result.slr.Response.Response != nil {
+ sc = result.slr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
+ result.fn = client.listByInvoiceSectionNameNextResults
req, err := client.ListByInvoiceSectionNamePreparer(ctx, billingAccountName, invoiceSectionName)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.SubscriptionsClient", "ListByInvoiceSectionName", nil, "Failure preparing request")
@@ -329,12 +560,12 @@ func (client SubscriptionsClient) ListByInvoiceSectionName(ctx context.Context,
resp, err := client.ListByInvoiceSectionNameSender(req)
if err != nil {
- result.Response = autorest.Response{Response: resp}
+ result.slr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "billing.SubscriptionsClient", "ListByInvoiceSectionName", resp, "Failure sending request")
return
}
- result, err = client.ListByInvoiceSectionNameResponder(resp)
+ result.slr, err = client.ListByInvoiceSectionNameResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.SubscriptionsClient", "ListByInvoiceSectionName", resp, "Failure responding to request")
}
@@ -382,6 +613,43 @@ func (client SubscriptionsClient) ListByInvoiceSectionNameResponder(resp *http.R
return
}
+// listByInvoiceSectionNameNextResults retrieves the next set of results, if any.
+func (client SubscriptionsClient) listByInvoiceSectionNameNextResults(ctx context.Context, lastResults SubscriptionsListResult) (result SubscriptionsListResult, err error) {
+ req, err := lastResults.subscriptionsListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "billing.SubscriptionsClient", "listByInvoiceSectionNameNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByInvoiceSectionNameSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "billing.SubscriptionsClient", "listByInvoiceSectionNameNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByInvoiceSectionNameResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "billing.SubscriptionsClient", "listByInvoiceSectionNameNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByInvoiceSectionNameComplete enumerates all values, automatically crossing page boundaries as required.
+func (client SubscriptionsClient) ListByInvoiceSectionNameComplete(ctx context.Context, billingAccountName string, invoiceSectionName string) (result SubscriptionsListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SubscriptionsClient.ListByInvoiceSectionName")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByInvoiceSectionName(ctx, billingAccountName, invoiceSectionName)
+ return
+}
+
// Transfer transfers the subscription from one invoice section to another within a billing account.
// Parameters:
// billingAccountName - billing Account Id.
diff --git a/services/preview/billing/mgmt/2018-11-01-preview/billing/transactions.go b/services/preview/billing/mgmt/2018-11-01-preview/billing/transactions.go
index 9ad8dc62afb3..699fa5f89b5a 100644
--- a/services/preview/billing/mgmt/2018-11-01-preview/billing/transactions.go
+++ b/services/preview/billing/mgmt/2018-11-01-preview/billing/transactions.go
@@ -171,17 +171,18 @@ func (client TransactionsClient) ListByBillingAccountNameComplete(ctx context.Co
// filter - may be used to filter by transaction kind. The filter supports 'eq', 'lt', 'gt', 'le', 'ge', and
// 'and'. It does not currently support 'ne', 'or', or 'not'. Tag filter is a key value pair string where key
// and value is separated by a colon (:).
-func (client TransactionsClient) ListByBillingProfileName(ctx context.Context, billingAccountName string, billingProfileName string, startDate string, endDate string, filter string) (result TransactionsListResult, err error) {
+func (client TransactionsClient) ListByBillingProfileName(ctx context.Context, billingAccountName string, billingProfileName string, startDate string, endDate string, filter string) (result TransactionsListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/TransactionsClient.ListByBillingProfileName")
defer func() {
sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
+ if result.tlr.Response.Response != nil {
+ sc = result.tlr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
+ result.fn = client.listByBillingProfileNameNextResults
req, err := client.ListByBillingProfileNamePreparer(ctx, billingAccountName, billingProfileName, startDate, endDate, filter)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.TransactionsClient", "ListByBillingProfileName", nil, "Failure preparing request")
@@ -190,12 +191,12 @@ func (client TransactionsClient) ListByBillingProfileName(ctx context.Context, b
resp, err := client.ListByBillingProfileNameSender(req)
if err != nil {
- result.Response = autorest.Response{Response: resp}
+ result.tlr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "billing.TransactionsClient", "ListByBillingProfileName", resp, "Failure sending request")
return
}
- result, err = client.ListByBillingProfileNameResponder(resp)
+ result.tlr, err = client.ListByBillingProfileNameResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.TransactionsClient", "ListByBillingProfileName", resp, "Failure responding to request")
}
@@ -248,6 +249,167 @@ func (client TransactionsClient) ListByBillingProfileNameResponder(resp *http.Re
return
}
+// listByBillingProfileNameNextResults retrieves the next set of results, if any.
+func (client TransactionsClient) listByBillingProfileNameNextResults(ctx context.Context, lastResults TransactionsListResult) (result TransactionsListResult, err error) {
+ req, err := lastResults.transactionsListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "billing.TransactionsClient", "listByBillingProfileNameNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByBillingProfileNameSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "billing.TransactionsClient", "listByBillingProfileNameNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByBillingProfileNameResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "billing.TransactionsClient", "listByBillingProfileNameNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByBillingProfileNameComplete enumerates all values, automatically crossing page boundaries as required.
+func (client TransactionsClient) ListByBillingProfileNameComplete(ctx context.Context, billingAccountName string, billingProfileName string, startDate string, endDate string, filter string) (result TransactionsListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TransactionsClient.ListByBillingProfileName")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByBillingProfileName(ctx, billingAccountName, billingProfileName, startDate, endDate, filter)
+ return
+}
+
+// ListByCustomerName lists the transactions by invoice section name for given start date and end date.
+// Parameters:
+// billingAccountName - billing Account Id.
+// customerName - customer Id.
+// startDate - start date
+// endDate - end date
+// filter - may be used to filter by transaction kind. The filter supports 'eq', 'lt', 'gt', 'le', 'ge', and
+// 'and'. It does not currently support 'ne', 'or', or 'not'. Tag filter is a key value pair string where key
+// and value is separated by a colon (:).
+func (client TransactionsClient) ListByCustomerName(ctx context.Context, billingAccountName string, customerName string, startDate string, endDate string, filter string) (result TransactionsListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TransactionsClient.ListByCustomerName")
+ defer func() {
+ sc := -1
+ if result.tlr.Response.Response != nil {
+ sc = result.tlr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listByCustomerNameNextResults
+ req, err := client.ListByCustomerNamePreparer(ctx, billingAccountName, customerName, startDate, endDate, filter)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "billing.TransactionsClient", "ListByCustomerName", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByCustomerNameSender(req)
+ if err != nil {
+ result.tlr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "billing.TransactionsClient", "ListByCustomerName", resp, "Failure sending request")
+ return
+ }
+
+ result.tlr, err = client.ListByCustomerNameResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "billing.TransactionsClient", "ListByCustomerName", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByCustomerNamePreparer prepares the ListByCustomerName request.
+func (client TransactionsClient) ListByCustomerNamePreparer(ctx context.Context, billingAccountName string, customerName string, startDate string, endDate string, filter string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "billingAccountName": autorest.Encode("path", billingAccountName),
+ "customerName": autorest.Encode("path", customerName),
+ }
+
+ const APIVersion = "2018-11-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ "endDate": autorest.Encode("query", endDate),
+ "startDate": autorest.Encode("query", startDate),
+ }
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/customers/{customerName}/transactions", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByCustomerNameSender sends the ListByCustomerName request. The method will close the
+// http.Response Body if it receives an error.
+func (client TransactionsClient) ListByCustomerNameSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByCustomerNameResponder handles the response to the ListByCustomerName request. The method always
+// closes the http.Response Body.
+func (client TransactionsClient) ListByCustomerNameResponder(resp *http.Response) (result TransactionsListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByCustomerNameNextResults retrieves the next set of results, if any.
+func (client TransactionsClient) listByCustomerNameNextResults(ctx context.Context, lastResults TransactionsListResult) (result TransactionsListResult, err error) {
+ req, err := lastResults.transactionsListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "billing.TransactionsClient", "listByCustomerNameNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByCustomerNameSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "billing.TransactionsClient", "listByCustomerNameNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByCustomerNameResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "billing.TransactionsClient", "listByCustomerNameNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByCustomerNameComplete enumerates all values, automatically crossing page boundaries as required.
+func (client TransactionsClient) ListByCustomerNameComplete(ctx context.Context, billingAccountName string, customerName string, startDate string, endDate string, filter string) (result TransactionsListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TransactionsClient.ListByCustomerName")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByCustomerName(ctx, billingAccountName, customerName, startDate, endDate, filter)
+ return
+}
+
// ListByInvoiceSectionName lists the transactions by invoice section name for given start date and end date.
// Parameters:
// billingAccountName - billing Account Id.
@@ -257,17 +419,18 @@ func (client TransactionsClient) ListByBillingProfileNameResponder(resp *http.Re
// filter - may be used to filter by transaction kind. The filter supports 'eq', 'lt', 'gt', 'le', 'ge', and
// 'and'. It does not currently support 'ne', 'or', or 'not'. Tag filter is a key value pair string where key
// and value is separated by a colon (:).
-func (client TransactionsClient) ListByInvoiceSectionName(ctx context.Context, billingAccountName string, invoiceSectionName string, startDate string, endDate string, filter string) (result TransactionsListResult, err error) {
+func (client TransactionsClient) ListByInvoiceSectionName(ctx context.Context, billingAccountName string, invoiceSectionName string, startDate string, endDate string, filter string) (result TransactionsListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/TransactionsClient.ListByInvoiceSectionName")
defer func() {
sc := -1
- if result.Response.Response != nil {
- sc = result.Response.Response.StatusCode
+ if result.tlr.Response.Response != nil {
+ sc = result.tlr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
+ result.fn = client.listByInvoiceSectionNameNextResults
req, err := client.ListByInvoiceSectionNamePreparer(ctx, billingAccountName, invoiceSectionName, startDate, endDate, filter)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.TransactionsClient", "ListByInvoiceSectionName", nil, "Failure preparing request")
@@ -276,12 +439,12 @@ func (client TransactionsClient) ListByInvoiceSectionName(ctx context.Context, b
resp, err := client.ListByInvoiceSectionNameSender(req)
if err != nil {
- result.Response = autorest.Response{Response: resp}
+ result.tlr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "billing.TransactionsClient", "ListByInvoiceSectionName", resp, "Failure sending request")
return
}
- result, err = client.ListByInvoiceSectionNameResponder(resp)
+ result.tlr, err = client.ListByInvoiceSectionNameResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "billing.TransactionsClient", "ListByInvoiceSectionName", resp, "Failure responding to request")
}
@@ -333,3 +496,40 @@ func (client TransactionsClient) ListByInvoiceSectionNameResponder(resp *http.Re
result.Response = autorest.Response{Response: resp}
return
}
+
+// listByInvoiceSectionNameNextResults retrieves the next set of results, if any.
+func (client TransactionsClient) listByInvoiceSectionNameNextResults(ctx context.Context, lastResults TransactionsListResult) (result TransactionsListResult, err error) {
+ req, err := lastResults.transactionsListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "billing.TransactionsClient", "listByInvoiceSectionNameNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByInvoiceSectionNameSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "billing.TransactionsClient", "listByInvoiceSectionNameNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByInvoiceSectionNameResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "billing.TransactionsClient", "listByInvoiceSectionNameNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByInvoiceSectionNameComplete enumerates all values, automatically crossing page boundaries as required.
+func (client TransactionsClient) ListByInvoiceSectionNameComplete(ctx context.Context, billingAccountName string, invoiceSectionName string, startDate string, endDate string, filter string) (result TransactionsListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TransactionsClient.ListByInvoiceSectionName")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByInvoiceSectionName(ctx, billingAccountName, invoiceSectionName, startDate, endDate, filter)
+ return
+}
diff --git a/services/preview/cognitiveservices/v1.0/inkrecognizer/models.go b/services/preview/cognitiveservices/v1.0/inkrecognizer/models.go
index f200f311b249..c7aec42aab70 100644
--- a/services/preview/cognitiveservices/v1.0/inkrecognizer/models.go
+++ b/services/preview/cognitiveservices/v1.0/inkrecognizer/models.go
@@ -53,10 +53,10 @@ const (
InkWord Category = "inkWord"
// Line ...
Line Category = "line"
- // ListItem ...
- ListItem Category = "listItem"
// Paragraph ...
Paragraph Category = "paragraph"
+ // Root ...
+ Root Category = "root"
// Unknown ...
Unknown Category = "unknown"
// WritingRegion ...
@@ -65,7 +65,7 @@ const (
// PossibleCategoryValues returns an array of possible values for the Category const type.
func PossibleCategoryValues() []Category {
- return []Category{InkBullet, InkDrawing, InkWord, Line, ListItem, Paragraph, Unknown, WritingRegion}
+ return []Category{InkBullet, InkDrawing, InkWord, Line, Paragraph, Root, Unknown, WritingRegion}
}
// Class enumerates the values for class.
@@ -89,17 +89,48 @@ type Container string
const (
// ContainerLine ...
ContainerLine Container = "line"
- // ContainerListItem ...
- ContainerListItem Container = "listItem"
// ContainerParagraph ...
ContainerParagraph Container = "paragraph"
+ // ContainerRoot ...
+ ContainerRoot Container = "root"
// ContainerWritingRegion ...
ContainerWritingRegion Container = "writingRegion"
)
// PossibleContainerValues returns an array of possible values for the Container const type.
func PossibleContainerValues() []Container {
- return []Container{ContainerLine, ContainerListItem, ContainerParagraph, ContainerWritingRegion}
+ return []Container{ContainerLine, ContainerParagraph, ContainerRoot, ContainerWritingRegion}
+}
+
+// InputDevice enumerates the values for input device.
+type InputDevice string
+
+const (
+ // Armature ...
+ Armature InputDevice = "armature"
+ // ArticulatedArm ...
+ ArticulatedArm InputDevice = "articulatedArm"
+ // Digitizer ...
+ Digitizer InputDevice = "digitizer"
+ // LightPen ...
+ LightPen InputDevice = "lightPen"
+ // Pen ...
+ Pen InputDevice = "pen"
+ // StereoPlotter ...
+ StereoPlotter InputDevice = "stereoPlotter"
+ // ThreedDigitizer ...
+ ThreedDigitizer InputDevice = "3dDigitizer"
+ // TouchPad ...
+ TouchPad InputDevice = "touchPad"
+ // TouchScreen ...
+ TouchScreen InputDevice = "touchScreen"
+ // WhiteBoard ...
+ WhiteBoard InputDevice = "whiteBoard"
+)
+
+// PossibleInputDeviceValues returns an array of possible values for the InputDevice const type.
+func PossibleInputDeviceValues() []InputDevice {
+ return []InputDevice{Armature, ArticulatedArm, Digitizer, LightPen, Pen, StereoPlotter, ThreedDigitizer, TouchPad, TouchScreen, WhiteBoard}
}
// Kind enumerates the values for kind.
@@ -242,23 +273,6 @@ func PossibleUnitValues() []Unit {
return []Unit{Cm, In, Mm}
}
-// Unit1 enumerates the values for unit 1.
-type Unit1 string
-
-const (
- // Unit1Cm ...
- Unit1Cm Unit1 = "cm"
- // Unit1In ...
- Unit1In Unit1 = "in"
- // Unit1Mm ...
- Unit1Mm Unit1 = "mm"
-)
-
-// PossibleUnit1Values returns an array of possible values for the Unit1 const type.
-func PossibleUnit1Values() []Unit1 {
- return []Unit1{Unit1Cm, Unit1In, Unit1Mm}
-}
-
// AlternatePatternItem ...
type AlternatePatternItem struct {
// Category - Possible values include: 'LeafInkDrawing', 'LeafInkBullet', 'LeafInkWord', 'LeafUnknown'
@@ -277,12 +291,15 @@ type AlternatePatternItem struct {
type AnalysisRequest struct {
// ApplicationType - This describes the domain of the client application. Possible values include: 'Drawing', 'Writing', 'Mixed'
ApplicationType Application `json:"applicationType,omitempty"`
- // Unit - This represents the physical units of the ink strokes. It is up to the application developer to decide how to convert the device specific units to physical units before calling the service. The conversion factor can be different based on the type of the device used. Possible values include: 'Unit1Mm', 'Unit1Cm', 'Unit1In'
- Unit Unit1 `json:"unit,omitempty"`
+ // InputDeviceKind - This identifies the kind of device used as the writing instrument. Possible values include: 'Digitizer', 'Pen', 'LightPen', 'TouchScreen', 'TouchPad', 'WhiteBoard', 'ThreedDigitizer', 'StereoPlotter', 'ArticulatedArm', 'Armature'
+ InputDeviceKind InputDevice `json:"inputDeviceKind,omitempty"`
+ // Unit - This is the physical unit of the ink strokes. It is up to the application developer to decide how to convert the device specific units to physical units before calling the service. The conversion factor can be different based on the type of the device used. Possible values include: 'Mm', 'Cm', 'In'
+ Unit Unit `json:"unit,omitempty"`
// UnitMultiple - This is a scaling factor to be applied to the point coordinates when interpreting them in the physical units specified.
UnitMultiple *float64 `json:"unitMultiple,omitempty"`
// Language - The IETF BCP 47 language code (for ex. en-US, en-GB, hi-IN etc.) of the expected language for the handwritten content in the ink strokes. The response will include results from this language.
- Language *string `json:"language,omitempty"`
+ Language *string `json:"language,omitempty"`
+ InkPointValueAttributes *[]InkPointValueAttribute `json:"inkPointValueAttributes,omitempty"`
// Strokes - This is the array of strokes sent for recognition. Best results are produced when the order of strokes added in the array matches the order in which the user created them. Changing the stroke order may produce unexpected results.
Strokes *[]Stroke `json:"strokes,omitempty"`
}
@@ -290,11 +307,11 @@ type AnalysisRequest struct {
// AnalysisResponse this shows the expected contents of a response from the service
type AnalysisResponse struct {
autorest.Response `json:"-"`
- // Unit - This represents the physical units of the ink strokes. It is up to the application developer to decide how to convert the device specific units to physical units before calling the service. The conversion factor can be different based on the type of the device used. Possible values include: 'Mm', 'Cm', 'In'
+ // Unit - This is the physical unit of the ink strokes. It is up to the application developer to decide how to convert the device specific units to physical units before calling the service. The conversion factor can be different based on the type of the device used. Possible values include: 'Mm', 'Cm', 'In'
Unit Unit `json:"unit,omitempty"`
// UnitMultiple - This is a scaling factor to be applied to the point coordinates when interpreting them in the physical units specified.
UnitMultiple *float64 `json:"unitMultiple,omitempty"`
- // Language - This is the language used for recognizing handwriting from the ink strokes in the request. Set this to the user’s preferred language.
+ // Language - This is the language used for recognizing handwriting from the ink strokes in the request.
Language *string `json:"language,omitempty"`
RecognitionUnits *[]RecognitionUnitItem `json:"recognitionUnits,omitempty"`
}
@@ -351,6 +368,53 @@ type ErrorModelDetailsItem struct {
Target *string `json:"target,omitempty"`
}
+// InkPoint an object containing the properties of an point in the path of an ink stroke. The main
+// properties are the x and y values. Other include tip pressure, x tilt etc. For the coordinate values, it
+// is recommended to have a precision of 8 digits after the decimal to obtain most accurate recognition
+// results. The origin (0,0) of the canvas is assumed to be at the top left corner of the canvas
+type InkPoint struct {
+ // X - The x coordinate of the pen location on the writing surface.
+ X *float64 `json:"x,omitempty"`
+ // Y - The y coordinate of the pen location on the writing surface.
+ Y *float64 `json:"y,omitempty"`
+ // Z - The z coordinate of the pen location on the writing space. This may not be used for recognition.
+ Z *float64 `json:"z,omitempty"`
+ // TipPressure - The force exerted against the tablet surface by the transducer, typically a stylus. This may not be used for recognition.
+ TipPressure *float64 `json:"tipPressure,omitempty"`
+ // BarrelPressure - The force exerted directly by the user on a transducer sensor, such as a pressure-sensitive button on the barrel of a stylus. This may not be used for recognition.
+ BarrelPressure *float64 `json:"barrelPressure,omitempty"`
+ // Timestamp - The time relative to the absolute time the transducer last became active. This may not be used for recognition.
+ Timestamp *float64 `json:"timestamp,omitempty"`
+ // XTilt - The plane angle between the Y-Z plane and the plane containing the transducer axis and the Y axis. This may not be used for recognition.
+ XTilt *float64 `json:"xTilt,omitempty"`
+ // YTilt - The angle between the X-Z and transducer-X planes. A positive Y Tilt is toward the user. This may not be used for recognition.
+ YTilt *float64 `json:"yTilt,omitempty"`
+ // Width - The width of the tip of the writing instrument. This is used by touch screen devices to report the width of the finger contact on the writing surface. This may not be used for recognition.
+ Width *float64 `json:"width,omitempty"`
+ // Height - The height of the tip of the writing instrument. This is used by touch screen devices to report the height of the finger contact on the writing surface. This may not be used for recognition.
+ Height *float64 `json:"height,omitempty"`
+ // TipSwitch - A switch located on the tip of a stylus indicating contact of the stylus with a surface. This may not be used for recognition.
+ TipSwitch *bool `json:"tipSwitch,omitempty"`
+ // Inverted - A value that indicates that the currently sensed position originates from the end of a stylus opposite the tip switch. This may not be used for recognition.
+ Inverted *bool `json:"inverted,omitempty"`
+ // BarrelSwitch - A non-tip button located on the barrel of a stylus. Its function is typically mapped to a system secondary button. This may not be used for recognition.
+ BarrelSwitch *bool `json:"barrelSwitch,omitempty"`
+ // Eraser - The control is used for erasing objects. It is typically located opposite the writing end of a stylus. This may not be used for recognition.
+ Eraser *bool `json:"eraser,omitempty"`
+ // SecondaryTip - A secondary switch used in conjunction with the tip switch to indicate pressure above a certain threshold applied with the stylus. This may not be used for recognition.
+ SecondaryTip *bool `json:"secondaryTip,omitempty"`
+}
+
+// InkPointValueAttribute a container for the attributes of a value contained in the ink point object.
+type InkPointValueAttribute struct {
+ // Name - The name of the point attribute.
+ Name *string `json:"name,omitempty"`
+ // LogicalMinimum - The minimum value for the attribute
+ LogicalMinimum *float64 `json:"logicalMinimum,omitempty"`
+ // LogicalMaximum - The maximum value for the attribute
+ LogicalMaximum *float64 `json:"logicalMaximum,omitempty"`
+}
+
// PointDetailsPattern this holds all the properties of one point
type PointDetailsPattern struct {
// X - This represents the x coordinate of the point
@@ -359,11 +423,11 @@ type PointDetailsPattern struct {
Y *float64 `json:"y,omitempty"`
}
-// RecognitionUnitItem this represents the recognized entity
+// RecognitionUnitItem this identifies the recognized entity
type RecognitionUnitItem struct {
// ID - The identifier of the recognition unit. This id is used to indicate parent/child relationship between different recognition units.
ID *int32 `json:"id,omitempty"`
- // Category - Possible values include: 'WritingRegion', 'Paragraph', 'ListItem', 'Line', 'InkBullet', 'InkDrawing', 'InkWord', 'Unknown'
+ // Category - Possible values include: 'Root', 'WritingRegion', 'Paragraph', 'Line', 'InkBullet', 'InkDrawing', 'InkWord', 'Unknown'
Category Category `json:"category,omitempty"`
Alternates *[]AlternatePatternItem `json:"alternates,omitempty"`
// Center - The coordinates (x,y) of the center of the recognition unit.
@@ -376,34 +440,34 @@ type RecognitionUnitItem struct {
Class Class `json:"class,omitempty"`
// ParentID - The id of the parent node in the tree structure of the recognition results. parent = 0 indicates that there is no dedicated parent node for this unit.
ParentID *int32 `json:"parentId,omitempty"`
- // BoundingRectangle - The bounding rectangle of the recognized unit represented by the coordinates of the top left corner (x,y) along with width (w) and height (h) of the rectangle. Note that this rectangle is not rotated. So for rotated objects such as slanted handwriting, it will cover the entire object. The unit will be matched to the one specified in the original request (mm by default.)
+ // BoundingRectangle - The bounding rectangle of the recognition unit represented by the coordinates of the top left corner (topX,topY) along with width and height of the rectangle. Note that this rectangle is not rotated. So for rotated objects such as slanted handwriting, it will cover the entire object. The unit will be matched to the one specified in the original request (mm by default.)
BoundingRectangle *RecognitionUnitItemBoundingRectangle `json:"boundingRectangle,omitempty"`
- // RotatedBoundingRectangle - This property provides the rotated bounding rectangle that covers the entire recognized object along the angle of rotation of the object. Note that this is NOT the same as rotating the boundingRectangle by the rotation angle.
+ // RotatedBoundingRectangle - This is the rotated bounding rectangle that covers the entire recognized object along the angle of rotation of the object. Note that this is NOT the same as rotating the boundingRectangle by the rotation angle.
RotatedBoundingRectangle *[]PointDetailsPattern `json:"rotatedBoundingRectangle,omitempty"`
- // StrokeIds - This is an array of integers representing the list of stroke Identifier integers from the input request body that belong to this recognition unit.
+ // StrokeIds - This is an array of integers representing the list of stroke Identifiers from the input request body that belong to this recognition unit.
StrokeIds *[]int32 `json:"strokeIds,omitempty"`
- // RecognizedText - The string represents the text that was recognized. It can be an empty string if the recognizer cannot determine the text.
+ // RecognizedText - The string contains the text that was recognized. It can be an empty string if the recognizer cannot determine the text.
RecognizedText *string `json:"recognizedText,omitempty"`
- // Confidence - The class represents the type of the recognition unit. A recognition unit can be a leaf node or a container node. Container nodes typically have leaf nodes as children.
+ // Confidence - A number between 0 and 1 which indicates the confidence level in the result.
Confidence *float64 `json:"confidence,omitempty"`
- // RotationAngle - This represents the angle at which the unit is rotated in degrees with respect to the positive X axis.
+ // RotationAngle - This is the angle at which the unit is rotated in degrees with respect to the positive X axis.
RotationAngle *float64 `json:"rotationAngle,omitempty"`
// RecognizedObject - Possible values include: 'ShapeDrawing', 'ShapeSquare', 'ShapeRectangle', 'ShapeCircle', 'ShapeEllipse', 'ShapeTriangle', 'ShapeIsoscelesTriangle', 'ShapeEquilateralTriangle', 'ShapeRightTriangle', 'ShapeQuadrilateral', 'ShapeDiamond', 'ShapeTrapezoid', 'ShapeParallelogram', 'ShapePentagon', 'ShapeHexagon', 'ShapeBlockArrow', 'ShapeHeart', 'ShapeStarSimple', 'ShapeStarCrossed', 'ShapeCloud', 'ShapeLine', 'ShapeCurve', 'ShapePolyLine'
RecognizedObject Shape `json:"recognizedObject,omitempty"`
}
-// RecognitionUnitItemBoundingRectangle the bounding rectangle of the recognized unit represented by the
-// coordinates of the top left corner (x,y) along with width (w) and height (h) of the rectangle. Note that
+// RecognitionUnitItemBoundingRectangle the bounding rectangle of the recognition unit represented by the
+// coordinates of the top left corner (topX,topY) along with width and height of the rectangle. Note that
// this rectangle is not rotated. So for rotated objects such as slanted handwriting, it will cover the
// entire object. The unit will be matched to the one specified in the original request (mm by default.)
type RecognitionUnitItemBoundingRectangle struct {
- // TopX - This represents the top left x coordinate
+ // TopX - This is the top left x coordinate
TopX *float64 `json:"topX,omitempty"`
- // TopY - This represents the top left y coordinate
+ // TopY - This is the top left y coordinate
TopY *float64 `json:"topY,omitempty"`
- // Width - This represents width of the bounding rectangle
+ // Width - This is width of the bounding rectangle
Width *float64 `json:"width,omitempty"`
- // Height - The represents the height of the bounding rectangle
+ // Height - The is the height of the bounding rectangle
Height *float64 `json:"height,omitempty"`
}
@@ -412,9 +476,8 @@ type Stroke struct {
// ID - This is treated as a unique identifier for each stroke within a request. If the id is repeated within the same request, the service will return an error.
ID *int32 `json:"id,omitempty"`
// Language - The IETF BCP 47 language code (for ex. en-US, en-GB, hi-IN etc.) of the expected language for the handwritten content in this stroke. The response will include results from this language.
- Language *string `json:"language,omitempty"`
- // Points - A string of comma separated floating point values that represent the x and y coordinates of points that are part of the stroke. (X1,Y1, X2,Y2…). It is recommended to have a precision of 8 digits after the decimal to obtain most accurate recognition results. The origin (0,0) of the canvas is assumed to be at the top left corner of the canvas
- Points *string `json:"points,omitempty"`
+ Language *string `json:"language,omitempty"`
+ Points *[]InkPoint `json:"points,omitempty"`
DrawingAttributes *DrawingAttributesPattern `json:"drawingAttributes,omitempty"`
// Kind - This is an optional property which influences the decision about what the stroke kind is between inkWriting and inkDrawing. This property should be set ONLY if the type of user content is known ahead of time. Not setting this value implies the kind is not known ahead of time. Kind represents the type of content the stroke is a part of. Possible values include: 'KindInkDrawing', 'KindInkWriting'
Kind Kind `json:"kind,omitempty"`
diff --git a/services/preview/containerregistry/runtime/2019-07/containerregistry/client.go b/services/preview/containerregistry/runtime/2019-07/containerregistry/client.go
new file mode 100644
index 000000000000..eae158753249
--- /dev/null
+++ b/services/preview/containerregistry/runtime/2019-07/containerregistry/client.go
@@ -0,0 +1,1598 @@
+// Package containerregistry implements the Azure ARM Containerregistry service API version 2019-07-15-preview.
+//
+// Metadata API definition for the Azure Container Registry runtime
+package containerregistry
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// BaseClient is the base client for Containerregistry.
+type BaseClient struct {
+ autorest.Client
+ LoginURI string
+}
+
+// New creates an instance of the BaseClient client.
+func New(loginURI string) BaseClient {
+ return NewWithoutDefaults(loginURI)
+}
+
+// NewWithoutDefaults creates an instance of the BaseClient client.
+func NewWithoutDefaults(loginURI string) BaseClient {
+ return BaseClient{
+ Client: autorest.NewClientWithUserAgent(UserAgent()),
+ LoginURI: loginURI,
+ }
+}
+
+// CreateManifest put the manifest identified by `name` and `reference` where `reference` can be a tag or digest.
+// Parameters:
+// name - name of the image (including the namespace)
+// reference - a tag or a digest, pointing to a specific image
+// payload - manifest body, can take v1 or v2 values depending on accept header
+func (client BaseClient) CreateManifest(ctx context.Context, name string, reference string, payload Manifest) (result SetObject, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.CreateManifest")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.CreateManifestPreparer(ctx, name, reference, payload)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "CreateManifest", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateManifestSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "CreateManifest", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateManifestResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "CreateManifest", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreateManifestPreparer prepares the CreateManifest request.
+func (client BaseClient) CreateManifestPreparer(ctx context.Context, name string, reference string, payload Manifest) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "url": client.LoginURI,
+ }
+
+ pathParameters := map[string]interface{}{
+ "name": autorest.Encode("path", name),
+ "reference": autorest.Encode("path", reference),
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/vnd.docker.distribution.manifest.v2+json"),
+ autorest.AsPut(),
+ autorest.WithCustomBaseURL("{url}", urlParameters),
+ autorest.WithPathParameters("/v2/{name}/manifests/{reference}", pathParameters),
+ autorest.WithJSON(payload))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateManifestSender sends the CreateManifest request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) CreateManifestSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CreateManifestResponder handles the response to the CreateManifest request. The method always
+// closes the http.Response Body.
+func (client BaseClient) CreateManifestResponder(resp *http.Response) (result SetObject, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result.Value),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// DeleteAcrRepository delete the repository identified by `name`
+// Parameters:
+// name - name of the image (including the namespace)
+func (client BaseClient) DeleteAcrRepository(ctx context.Context, name string) (result DeletedRepository, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteAcrRepository")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeleteAcrRepositoryPreparer(ctx, name)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "DeleteAcrRepository", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteAcrRepositorySender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "DeleteAcrRepository", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteAcrRepositoryResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "DeleteAcrRepository", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeleteAcrRepositoryPreparer prepares the DeleteAcrRepository request.
+func (client BaseClient) DeleteAcrRepositoryPreparer(ctx context.Context, name string) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "url": client.LoginURI,
+ }
+
+ pathParameters := map[string]interface{}{
+ "name": autorest.Encode("path", name),
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithCustomBaseURL("{url}", urlParameters),
+ autorest.WithPathParameters("/acr/v1/{name}", pathParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteAcrRepositorySender sends the DeleteAcrRepository request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) DeleteAcrRepositorySender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteAcrRepositoryResponder handles the response to the DeleteAcrRepository request. The method always
+// closes the http.Response Body.
+func (client BaseClient) DeleteAcrRepositoryResponder(resp *http.Response) (result DeletedRepository, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// DeleteAcrTag delete tag
+// Parameters:
+// name - name of the image (including the namespace)
+// reference - tag or digest of the target manifest
+func (client BaseClient) DeleteAcrTag(ctx context.Context, name string, reference string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteAcrTag")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeleteAcrTagPreparer(ctx, name, reference)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "DeleteAcrTag", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteAcrTagSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "DeleteAcrTag", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteAcrTagResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "DeleteAcrTag", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeleteAcrTagPreparer prepares the DeleteAcrTag request.
+func (client BaseClient) DeleteAcrTagPreparer(ctx context.Context, name string, reference string) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "url": client.LoginURI,
+ }
+
+ pathParameters := map[string]interface{}{
+ "name": autorest.Encode("path", name),
+ "reference": autorest.Encode("path", reference),
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithCustomBaseURL("{url}", urlParameters),
+ autorest.WithPathParameters("/acr/v1/{name}/_tags/{reference}", pathParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteAcrTagSender sends the DeleteAcrTag request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) DeleteAcrTagSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteAcrTagResponder handles the response to the DeleteAcrTag request. The method always
+// closes the http.Response Body.
+func (client BaseClient) DeleteAcrTagResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// DeleteManifest delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted
+// by `digest`.
+// Parameters:
+// name - name of the image (including the namespace)
+// reference - a tag or a digest, pointing to a specific image
+func (client BaseClient) DeleteManifest(ctx context.Context, name string, reference string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteManifest")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeleteManifestPreparer(ctx, name, reference)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "DeleteManifest", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteManifestSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "DeleteManifest", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteManifestResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "DeleteManifest", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeleteManifestPreparer prepares the DeleteManifest request.
+func (client BaseClient) DeleteManifestPreparer(ctx context.Context, name string, reference string) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "url": client.LoginURI,
+ }
+
+ pathParameters := map[string]interface{}{
+ "name": autorest.Encode("path", name),
+ "reference": autorest.Encode("path", reference),
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithCustomBaseURL("{url}", urlParameters),
+ autorest.WithPathParameters("/v2/{name}/manifests/{reference}", pathParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteManifestSender sends the DeleteManifest request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) DeleteManifestSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteManifestResponder handles the response to the DeleteManifest request. The method always
+// closes the http.Response Body.
+func (client BaseClient) DeleteManifestResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// GetAcrAccessToken exchange ACR Refresh token for an ACR Access Token
+// Parameters:
+// service - indicates the name of your Azure container registry.
+// scope - which is expected to be a valid scope, and can be specified more than once for multiple scope
+// requests. You obtained this from the Www-Authenticate response header from the challenge.
+// refreshToken - must be a valid ACR refresh token
+func (client BaseClient) GetAcrAccessToken(ctx context.Context, service string, scope string, refreshToken string) (result AccessToken, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetAcrAccessToken")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetAcrAccessTokenPreparer(ctx, service, scope, refreshToken)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetAcrAccessToken", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetAcrAccessTokenSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetAcrAccessToken", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetAcrAccessTokenResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetAcrAccessToken", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetAcrAccessTokenPreparer prepares the GetAcrAccessToken request.
+func (client BaseClient) GetAcrAccessTokenPreparer(ctx context.Context, service string, scope string, refreshToken string) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "url": client.LoginURI,
+ }
+
+ formDataParameters := map[string]interface{}{
+ "grant_type": "refresh_token",
+ "refresh_token": refreshToken,
+ "scope": scope,
+ "service": service,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{url}", urlParameters),
+ autorest.WithPath("/oauth2/token"),
+ autorest.WithFormData(autorest.MapToValues(formDataParameters)))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetAcrAccessTokenSender sends the GetAcrAccessToken request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) GetAcrAccessTokenSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetAcrAccessTokenResponder handles the response to the GetAcrAccessToken request. The method always
+// closes the http.Response Body.
+func (client BaseClient) GetAcrAccessTokenResponder(resp *http.Response) (result AccessToken, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetAcrAccessTokenFromLogin exchange Username, Password and Scope an ACR Access Token
+// Parameters:
+// service - indicates the name of your Azure container registry.
+// scope - expected to be a valid scope, and can be specified more than once for multiple scope requests. You
+// can obtain this from the Www-Authenticate response header from the challenge.
+func (client BaseClient) GetAcrAccessTokenFromLogin(ctx context.Context, service string, scope string) (result AccessToken, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetAcrAccessTokenFromLogin")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetAcrAccessTokenFromLoginPreparer(ctx, service, scope)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetAcrAccessTokenFromLogin", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetAcrAccessTokenFromLoginSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetAcrAccessTokenFromLogin", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetAcrAccessTokenFromLoginResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetAcrAccessTokenFromLogin", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetAcrAccessTokenFromLoginPreparer prepares the GetAcrAccessTokenFromLogin request.
+func (client BaseClient) GetAcrAccessTokenFromLoginPreparer(ctx context.Context, service string, scope string) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "url": client.LoginURI,
+ }
+
+ queryParameters := map[string]interface{}{
+ "scope": autorest.Encode("query", scope),
+ "service": autorest.Encode("query", service),
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{url}", urlParameters),
+ autorest.WithPath("/oauth2/token"),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetAcrAccessTokenFromLoginSender sends the GetAcrAccessTokenFromLogin request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) GetAcrAccessTokenFromLoginSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetAcrAccessTokenFromLoginResponder handles the response to the GetAcrAccessTokenFromLogin request. The method always
+// closes the http.Response Body.
+func (client BaseClient) GetAcrAccessTokenFromLoginResponder(resp *http.Response) (result AccessToken, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetAcrManifestAttributes get manifest attributes
+// Parameters:
+// name - name of the image (including the namespace)
+// reference - a tag or a digest, pointing to a specific image
+func (client BaseClient) GetAcrManifestAttributes(ctx context.Context, name string, reference string) (result AcrManifestAttributes, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetAcrManifestAttributes")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetAcrManifestAttributesPreparer(ctx, name, reference)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetAcrManifestAttributes", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetAcrManifestAttributesSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetAcrManifestAttributes", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetAcrManifestAttributesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetAcrManifestAttributes", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetAcrManifestAttributesPreparer prepares the GetAcrManifestAttributes request.
+func (client BaseClient) GetAcrManifestAttributesPreparer(ctx context.Context, name string, reference string) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "url": client.LoginURI,
+ }
+
+ pathParameters := map[string]interface{}{
+ "name": autorest.Encode("path", name),
+ "reference": autorest.Encode("path", reference),
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{url}", urlParameters),
+ autorest.WithPathParameters("/acr/v1/{name}/_manifests/{reference}", pathParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetAcrManifestAttributesSender sends the GetAcrManifestAttributes request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) GetAcrManifestAttributesSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetAcrManifestAttributesResponder handles the response to the GetAcrManifestAttributes request. The method always
+// closes the http.Response Body.
+func (client BaseClient) GetAcrManifestAttributesResponder(resp *http.Response) (result AcrManifestAttributes, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetAcrManifests list manifests of a repository
+// Parameters:
+// name - name of the image (including the namespace)
+// last - query parameter for the last item in previous query. Result set will include values lexically after
+// last.
+// n - query parameter for max number of items
+// orderby - orderby query parameter
+func (client BaseClient) GetAcrManifests(ctx context.Context, name string, last string, n *int32, orderby string) (result AcrManifests, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetAcrManifests")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetAcrManifestsPreparer(ctx, name, last, n, orderby)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetAcrManifests", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetAcrManifestsSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetAcrManifests", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetAcrManifestsResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetAcrManifests", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetAcrManifestsPreparer prepares the GetAcrManifests request.
+func (client BaseClient) GetAcrManifestsPreparer(ctx context.Context, name string, last string, n *int32, orderby string) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "url": client.LoginURI,
+ }
+
+ pathParameters := map[string]interface{}{
+ "name": autorest.Encode("path", name),
+ }
+
+ queryParameters := map[string]interface{}{}
+ if len(last) > 0 {
+ queryParameters["last"] = autorest.Encode("query", last)
+ }
+ if n != nil {
+ queryParameters["n"] = autorest.Encode("query", *n)
+ }
+ if len(orderby) > 0 {
+ queryParameters["orderby"] = autorest.Encode("query", orderby)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{url}", urlParameters),
+ autorest.WithPathParameters("/acr/v1/{name}/_manifests", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetAcrManifestsSender sends the GetAcrManifests request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) GetAcrManifestsSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetAcrManifestsResponder handles the response to the GetAcrManifests request. The method always
+// closes the http.Response Body.
+func (client BaseClient) GetAcrManifestsResponder(resp *http.Response) (result AcrManifests, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetAcrRefreshTokenFromExchange exchange AAD tokens for an ACR refresh Token
+// Parameters:
+// grantType - can take a value of access_token_refresh_token, or access_token, or refresh_token
+// service - indicates the name of your Azure container registry.
+// tenant - AAD tenant associated to the AAD credentials.
+// refreshToken - AAD refresh token, mandatory when grant_type is access_token_refresh_token or refresh_token
+// accessToken - AAD access token, mandatory when grant_type is access_token_refresh_token or access_token.
+func (client BaseClient) GetAcrRefreshTokenFromExchange(ctx context.Context, grantType string, service string, tenant string, refreshToken string, accessToken string) (result RefreshToken, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetAcrRefreshTokenFromExchange")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetAcrRefreshTokenFromExchangePreparer(ctx, grantType, service, tenant, refreshToken, accessToken)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetAcrRefreshTokenFromExchange", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetAcrRefreshTokenFromExchangeSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetAcrRefreshTokenFromExchange", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetAcrRefreshTokenFromExchangeResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetAcrRefreshTokenFromExchange", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetAcrRefreshTokenFromExchangePreparer prepares the GetAcrRefreshTokenFromExchange request.
+func (client BaseClient) GetAcrRefreshTokenFromExchangePreparer(ctx context.Context, grantType string, service string, tenant string, refreshToken string, accessToken string) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "url": client.LoginURI,
+ }
+
+ formDataParameters := map[string]interface{}{
+ "grant_type": grantType,
+ "service": service,
+ }
+ if len(tenant) > 0 {
+ formDataParameters["tenant"] = tenant
+ }
+ if len(refreshToken) > 0 {
+ formDataParameters["refresh_token"] = refreshToken
+ }
+ if len(accessToken) > 0 {
+ formDataParameters["access_token"] = accessToken
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithCustomBaseURL("{url}", urlParameters),
+ autorest.WithPath("/oauth2/exchange"),
+ autorest.WithFormData(autorest.MapToValues(formDataParameters)))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetAcrRefreshTokenFromExchangeSender sends the GetAcrRefreshTokenFromExchange request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) GetAcrRefreshTokenFromExchangeSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetAcrRefreshTokenFromExchangeResponder handles the response to the GetAcrRefreshTokenFromExchange request. The method always
+// closes the http.Response Body.
+func (client BaseClient) GetAcrRefreshTokenFromExchangeResponder(resp *http.Response) (result RefreshToken, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetAcrRepositories list repositories
+// Parameters:
+// last - query parameter for the last item in previous query. Result set will include values lexically after
+// last.
+// n - query parameter for max number of items
+func (client BaseClient) GetAcrRepositories(ctx context.Context, last string, n *int32) (result Repositories, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetAcrRepositories")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetAcrRepositoriesPreparer(ctx, last, n)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetAcrRepositories", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetAcrRepositoriesSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetAcrRepositories", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetAcrRepositoriesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetAcrRepositories", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetAcrRepositoriesPreparer prepares the GetAcrRepositories request.
+func (client BaseClient) GetAcrRepositoriesPreparer(ctx context.Context, last string, n *int32) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "url": client.LoginURI,
+ }
+
+ queryParameters := map[string]interface{}{}
+ if len(last) > 0 {
+ queryParameters["last"] = autorest.Encode("query", last)
+ }
+ if n != nil {
+ queryParameters["n"] = autorest.Encode("query", *n)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{url}", urlParameters),
+ autorest.WithPath("/acr/v1/_catalog"),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetAcrRepositoriesSender sends the GetAcrRepositories request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) GetAcrRepositoriesSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetAcrRepositoriesResponder handles the response to the GetAcrRepositories request. The method always
+// closes the http.Response Body.
+func (client BaseClient) GetAcrRepositoriesResponder(resp *http.Response) (result Repositories, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetAcrRepositoryAttributes get repository attributes
+// Parameters:
+// name - name of the image (including the namespace)
+func (client BaseClient) GetAcrRepositoryAttributes(ctx context.Context, name string) (result RepositoryAttributes, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetAcrRepositoryAttributes")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetAcrRepositoryAttributesPreparer(ctx, name)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetAcrRepositoryAttributes", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetAcrRepositoryAttributesSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetAcrRepositoryAttributes", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetAcrRepositoryAttributesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetAcrRepositoryAttributes", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetAcrRepositoryAttributesPreparer prepares the GetAcrRepositoryAttributes request.
+func (client BaseClient) GetAcrRepositoryAttributesPreparer(ctx context.Context, name string) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "url": client.LoginURI,
+ }
+
+ pathParameters := map[string]interface{}{
+ "name": autorest.Encode("path", name),
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{url}", urlParameters),
+ autorest.WithPathParameters("/acr/v1/{name}", pathParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetAcrRepositoryAttributesSender sends the GetAcrRepositoryAttributes request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) GetAcrRepositoryAttributesSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetAcrRepositoryAttributesResponder handles the response to the GetAcrRepositoryAttributes request. The method always
+// closes the http.Response Body.
+func (client BaseClient) GetAcrRepositoryAttributesResponder(resp *http.Response) (result RepositoryAttributes, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetAcrTagAttributes get tag attributes by tag
+// Parameters:
+// name - name of the image (including the namespace)
+// reference - tag or digest of the target manifest
+func (client BaseClient) GetAcrTagAttributes(ctx context.Context, name string, reference string) (result AcrTagAttributes, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetAcrTagAttributes")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetAcrTagAttributesPreparer(ctx, name, reference)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetAcrTagAttributes", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetAcrTagAttributesSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetAcrTagAttributes", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetAcrTagAttributesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetAcrTagAttributes", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetAcrTagAttributesPreparer prepares the GetAcrTagAttributes request.
+func (client BaseClient) GetAcrTagAttributesPreparer(ctx context.Context, name string, reference string) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "url": client.LoginURI,
+ }
+
+ pathParameters := map[string]interface{}{
+ "name": autorest.Encode("path", name),
+ "reference": autorest.Encode("path", reference),
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{url}", urlParameters),
+ autorest.WithPathParameters("/acr/v1/{name}/_tags/{reference}", pathParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetAcrTagAttributesSender sends the GetAcrTagAttributes request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) GetAcrTagAttributesSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetAcrTagAttributesResponder handles the response to the GetAcrTagAttributes request. The method always
+// closes the http.Response Body.
+func (client BaseClient) GetAcrTagAttributesResponder(resp *http.Response) (result AcrTagAttributes, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetAcrTags list tags of a repository
+// Parameters:
+// name - name of the image (including the namespace)
+// last - query parameter for the last item in previous query. Result set will include values lexically after
+// last.
+// n - query parameter for max number of items
+// orderby - orderby query parameter
+// digest - filter by digest
+func (client BaseClient) GetAcrTags(ctx context.Context, name string, last string, n *int32, orderby string, digest string) (result AcrRepositoryTags, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetAcrTags")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetAcrTagsPreparer(ctx, name, last, n, orderby, digest)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetAcrTags", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetAcrTagsSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetAcrTags", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetAcrTagsResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetAcrTags", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetAcrTagsPreparer prepares the GetAcrTags request.
+func (client BaseClient) GetAcrTagsPreparer(ctx context.Context, name string, last string, n *int32, orderby string, digest string) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "url": client.LoginURI,
+ }
+
+ pathParameters := map[string]interface{}{
+ "name": autorest.Encode("path", name),
+ }
+
+ queryParameters := map[string]interface{}{}
+ if len(last) > 0 {
+ queryParameters["last"] = autorest.Encode("query", last)
+ }
+ if n != nil {
+ queryParameters["n"] = autorest.Encode("query", *n)
+ }
+ if len(orderby) > 0 {
+ queryParameters["orderby"] = autorest.Encode("query", orderby)
+ }
+ if len(digest) > 0 {
+ queryParameters["digest"] = autorest.Encode("query", digest)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{url}", urlParameters),
+ autorest.WithPathParameters("/acr/v1/{name}/_tags", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetAcrTagsSender sends the GetAcrTags request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) GetAcrTagsSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetAcrTagsResponder handles the response to the GetAcrTags request. The method always
+// closes the http.Response Body.
+func (client BaseClient) GetAcrTagsResponder(resp *http.Response) (result AcrRepositoryTags, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetDockerRegistryV2Support tells whether this Docker Registry instance supports Docker Registry HTTP API v2
+func (client BaseClient) GetDockerRegistryV2Support(ctx context.Context) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDockerRegistryV2Support")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetDockerRegistryV2SupportPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetDockerRegistryV2Support", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetDockerRegistryV2SupportSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetDockerRegistryV2Support", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetDockerRegistryV2SupportResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetDockerRegistryV2Support", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetDockerRegistryV2SupportPreparer prepares the GetDockerRegistryV2Support request.
+func (client BaseClient) GetDockerRegistryV2SupportPreparer(ctx context.Context) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "url": client.LoginURI,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{url}", urlParameters),
+ autorest.WithPath("/v2/"))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetDockerRegistryV2SupportSender sends the GetDockerRegistryV2Support request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) GetDockerRegistryV2SupportSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetDockerRegistryV2SupportResponder handles the response to the GetDockerRegistryV2Support request. The method always
+// closes the http.Response Body.
+func (client BaseClient) GetDockerRegistryV2SupportResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// GetManifest pulls the image manifest file associated with the specified name and reference. Reference may be a tag
+// or a digest
+// Parameters:
+// name - name of the image (including the namespace)
+// reference - a tag or a digest, pointing to a specific image
+// accept - accept header string delimited by comma. For example,
+// application/vnd.docker.distribution.manifest.v2+json
+func (client BaseClient) GetManifest(ctx context.Context, name string, reference string, accept string) (result Manifest, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetManifest")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetManifestPreparer(ctx, name, reference, accept)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetManifest", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetManifestSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetManifest", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetManifestResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetManifest", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetManifestPreparer prepares the GetManifest request.
+func (client BaseClient) GetManifestPreparer(ctx context.Context, name string, reference string, accept string) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "url": client.LoginURI,
+ }
+
+ pathParameters := map[string]interface{}{
+ "name": autorest.Encode("path", name),
+ "reference": autorest.Encode("path", reference),
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{url}", urlParameters),
+ autorest.WithPathParameters("/v2/{name}/manifests/{reference}", pathParameters))
+ if len(accept) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("accept", autorest.String(accept)))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetManifestSender sends the GetManifest request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) GetManifestSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetManifestResponder handles the response to the GetManifest request. The method always
+// closes the http.Response Body.
+func (client BaseClient) GetManifestResponder(resp *http.Response) (result Manifest, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetRepositories list repositories
+// Parameters:
+// last - query parameter for the last item in previous query. Result set will include values lexically after
+// last.
+// n - query parameter for max number of items
+func (client BaseClient) GetRepositories(ctx context.Context, last string, n *int32) (result Repositories, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetRepositories")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetRepositoriesPreparer(ctx, last, n)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetRepositories", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetRepositoriesSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetRepositories", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetRepositoriesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetRepositories", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetRepositoriesPreparer prepares the GetRepositories request.
+func (client BaseClient) GetRepositoriesPreparer(ctx context.Context, last string, n *int32) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "url": client.LoginURI,
+ }
+
+ queryParameters := map[string]interface{}{}
+ if len(last) > 0 {
+ queryParameters["last"] = autorest.Encode("query", last)
+ }
+ if n != nil {
+ queryParameters["n"] = autorest.Encode("query", *n)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{url}", urlParameters),
+ autorest.WithPath("/v2/_catalog"),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetRepositoriesSender sends the GetRepositories request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) GetRepositoriesSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetRepositoriesResponder handles the response to the GetRepositories request. The method always
+// closes the http.Response Body.
+func (client BaseClient) GetRepositoriesResponder(resp *http.Response) (result Repositories, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetTagList fetch the tags under the repository identified by name
+// Parameters:
+// name - name of the image (including the namespace)
+func (client BaseClient) GetTagList(ctx context.Context, name string) (result RepositoryTags, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetTagList")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetTagListPreparer(ctx, name)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetTagList", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetTagListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetTagList", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetTagListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "GetTagList", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetTagListPreparer prepares the GetTagList request.
+func (client BaseClient) GetTagListPreparer(ctx context.Context, name string) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "url": client.LoginURI,
+ }
+
+ pathParameters := map[string]interface{}{
+ "name": autorest.Encode("path", name),
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithCustomBaseURL("{url}", urlParameters),
+ autorest.WithPathParameters("/v2/{name}/tags/list", pathParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetTagListSender sends the GetTagList request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) GetTagListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetTagListResponder handles the response to the GetTagList request. The method always
+// closes the http.Response Body.
+func (client BaseClient) GetTagListResponder(resp *http.Response) (result RepositoryTags, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// UpdateAcrManifestAttributes update attributes of a manifest
+// Parameters:
+// name - name of the image (including the namespace)
+// reference - a tag or a digest, pointing to a specific image
+// value - repository attribute value
+func (client BaseClient) UpdateAcrManifestAttributes(ctx context.Context, name string, reference string, value *ChangeableAttributes) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateAcrManifestAttributes")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.UpdateAcrManifestAttributesPreparer(ctx, name, reference, value)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "UpdateAcrManifestAttributes", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.UpdateAcrManifestAttributesSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "UpdateAcrManifestAttributes", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.UpdateAcrManifestAttributesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "UpdateAcrManifestAttributes", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// UpdateAcrManifestAttributesPreparer prepares the UpdateAcrManifestAttributes request.
+func (client BaseClient) UpdateAcrManifestAttributesPreparer(ctx context.Context, name string, reference string, value *ChangeableAttributes) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "url": client.LoginURI,
+ }
+
+ pathParameters := map[string]interface{}{
+ "name": autorest.Encode("path", name),
+ "reference": autorest.Encode("path", reference),
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithCustomBaseURL("{url}", urlParameters),
+ autorest.WithPathParameters("/acr/v1/{name}/_manifests/{reference}", pathParameters))
+ if value != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithJSON(value))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateAcrManifestAttributesSender sends the UpdateAcrManifestAttributes request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) UpdateAcrManifestAttributesSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// UpdateAcrManifestAttributesResponder handles the response to the UpdateAcrManifestAttributes request. The method always
+// closes the http.Response Body.
+func (client BaseClient) UpdateAcrManifestAttributesResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// UpdateAcrRepositoryAttributes update the attribute identified by `name` where `reference` is the name of the
+// repository.
+// Parameters:
+// name - name of the image (including the namespace)
+// value - repository attribute value
+func (client BaseClient) UpdateAcrRepositoryAttributes(ctx context.Context, name string, value *ChangeableAttributes) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateAcrRepositoryAttributes")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.UpdateAcrRepositoryAttributesPreparer(ctx, name, value)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "UpdateAcrRepositoryAttributes", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.UpdateAcrRepositoryAttributesSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "UpdateAcrRepositoryAttributes", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.UpdateAcrRepositoryAttributesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "UpdateAcrRepositoryAttributes", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// UpdateAcrRepositoryAttributesPreparer prepares the UpdateAcrRepositoryAttributes request.
+func (client BaseClient) UpdateAcrRepositoryAttributesPreparer(ctx context.Context, name string, value *ChangeableAttributes) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "url": client.LoginURI,
+ }
+
+ pathParameters := map[string]interface{}{
+ "name": autorest.Encode("path", name),
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithCustomBaseURL("{url}", urlParameters),
+ autorest.WithPathParameters("/acr/v1/{name}", pathParameters))
+ if value != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithJSON(value))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateAcrRepositoryAttributesSender sends the UpdateAcrRepositoryAttributes request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) UpdateAcrRepositoryAttributesSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// UpdateAcrRepositoryAttributesResponder handles the response to the UpdateAcrRepositoryAttributes request. The method always
+// closes the http.Response Body.
+func (client BaseClient) UpdateAcrRepositoryAttributesResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// UpdateAcrTagAttributes update tag attributes
+// Parameters:
+// name - name of the image (including the namespace)
+// reference - tag or digest of the target manifest
+// value - repository attribute value
+func (client BaseClient) UpdateAcrTagAttributes(ctx context.Context, name string, reference string, value *ChangeableAttributes) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateAcrTagAttributes")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.UpdateAcrTagAttributesPreparer(ctx, name, reference, value)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "UpdateAcrTagAttributes", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.UpdateAcrTagAttributesSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "UpdateAcrTagAttributes", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.UpdateAcrTagAttributesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "containerregistry.BaseClient", "UpdateAcrTagAttributes", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// UpdateAcrTagAttributesPreparer prepares the UpdateAcrTagAttributes request.
+func (client BaseClient) UpdateAcrTagAttributesPreparer(ctx context.Context, name string, reference string, value *ChangeableAttributes) (*http.Request, error) {
+ urlParameters := map[string]interface{}{
+ "url": client.LoginURI,
+ }
+
+ pathParameters := map[string]interface{}{
+ "name": autorest.Encode("path", name),
+ "reference": autorest.Encode("path", reference),
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithCustomBaseURL("{url}", urlParameters),
+ autorest.WithPathParameters("/acr/v1/{name}/_tags/{reference}", pathParameters))
+ if value != nil {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithJSON(value))
+ }
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateAcrTagAttributesSender sends the UpdateAcrTagAttributes request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) UpdateAcrTagAttributesSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// UpdateAcrTagAttributesResponder handles the response to the UpdateAcrTagAttributes request. The method always
+// closes the http.Response Body.
+func (client BaseClient) UpdateAcrTagAttributesResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
diff --git a/services/preview/containerregistry/runtime/2019-07/containerregistry/containerregistryapi/interfaces.go b/services/preview/containerregistry/runtime/2019-07/containerregistry/containerregistryapi/interfaces.go
new file mode 100644
index 000000000000..20b5058ad367
--- /dev/null
+++ b/services/preview/containerregistry/runtime/2019-07/containerregistry/containerregistryapi/interfaces.go
@@ -0,0 +1,50 @@
+package containerregistryapi
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-07/containerregistry"
+ "github.com/Azure/go-autorest/autorest"
+)
+
+// BaseClientAPI contains the set of methods on the BaseClient type.
+type BaseClientAPI interface {
+ CreateManifest(ctx context.Context, name string, reference string, payload containerregistry.Manifest) (result containerregistry.SetObject, err error)
+ DeleteAcrRepository(ctx context.Context, name string) (result containerregistry.DeletedRepository, err error)
+ DeleteAcrTag(ctx context.Context, name string, reference string) (result autorest.Response, err error)
+ DeleteManifest(ctx context.Context, name string, reference string) (result autorest.Response, err error)
+ GetAcrAccessToken(ctx context.Context, service string, scope string, refreshToken string) (result containerregistry.AccessToken, err error)
+ GetAcrAccessTokenFromLogin(ctx context.Context, service string, scope string) (result containerregistry.AccessToken, err error)
+ GetAcrManifestAttributes(ctx context.Context, name string, reference string) (result containerregistry.AcrManifestAttributes, err error)
+ GetAcrManifests(ctx context.Context, name string, last string, n *int32, orderby string) (result containerregistry.AcrManifests, err error)
+ GetAcrRefreshTokenFromExchange(ctx context.Context, grantType string, service string, tenant string, refreshToken string, accessToken string) (result containerregistry.RefreshToken, err error)
+ GetAcrRepositories(ctx context.Context, last string, n *int32) (result containerregistry.Repositories, err error)
+ GetAcrRepositoryAttributes(ctx context.Context, name string) (result containerregistry.RepositoryAttributes, err error)
+ GetAcrTagAttributes(ctx context.Context, name string, reference string) (result containerregistry.AcrTagAttributes, err error)
+ GetAcrTags(ctx context.Context, name string, last string, n *int32, orderby string, digest string) (result containerregistry.AcrRepositoryTags, err error)
+ GetDockerRegistryV2Support(ctx context.Context) (result autorest.Response, err error)
+ GetManifest(ctx context.Context, name string, reference string, accept string) (result containerregistry.Manifest, err error)
+ GetRepositories(ctx context.Context, last string, n *int32) (result containerregistry.Repositories, err error)
+ GetTagList(ctx context.Context, name string) (result containerregistry.RepositoryTags, err error)
+ UpdateAcrManifestAttributes(ctx context.Context, name string, reference string, value *containerregistry.ChangeableAttributes) (result autorest.Response, err error)
+ UpdateAcrRepositoryAttributes(ctx context.Context, name string, value *containerregistry.ChangeableAttributes) (result autorest.Response, err error)
+ UpdateAcrTagAttributes(ctx context.Context, name string, reference string, value *containerregistry.ChangeableAttributes) (result autorest.Response, err error)
+}
+
+var _ BaseClientAPI = (*containerregistry.BaseClient)(nil)
diff --git a/services/preview/containerregistry/runtime/2019-07/containerregistry/models.go b/services/preview/containerregistry/runtime/2019-07/containerregistry/models.go
new file mode 100644
index 000000000000..cf2e0897ca5f
--- /dev/null
+++ b/services/preview/containerregistry/runtime/2019-07/containerregistry/models.go
@@ -0,0 +1,329 @@
+package containerregistry
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/Azure/go-autorest/autorest"
+)
+
+// The package's fully qualified name.
+const fqdn = "github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-07/containerregistry"
+
+// AccessToken ...
+type AccessToken struct {
+ autorest.Response `json:"-"`
+ // AccessToken - The access token for performing authenticated requests
+ AccessToken *string `json:"access_token,omitempty"`
+}
+
+// AcrErrorInfo error information
+type AcrErrorInfo struct {
+ // Code - Error code
+ Code *string `json:"code,omitempty"`
+ // Message - Error message
+ Message *string `json:"message,omitempty"`
+ // Detail - Error details
+ Detail interface{} `json:"detail,omitempty"`
+}
+
+// AcrErrors acr error response describing why the operation failed
+type AcrErrors struct {
+ // Errors - Array of detailed error
+ Errors *[]AcrErrorInfo `json:"errors,omitempty"`
+}
+
+// AcrManifestAttributes manifest attributes details
+type AcrManifestAttributes struct {
+ autorest.Response `json:"-"`
+ // Registry - Registry name
+ Registry *string `json:"registry,omitempty"`
+ // ImageName - Image name
+ ImageName *string `json:"imageName,omitempty"`
+ // ManifestAttributes - Manifest attributes
+ ManifestAttributes *AcrManifestAttributesBase `json:"manifest,omitempty"`
+}
+
+// AcrManifestAttributesBase manifest details
+type AcrManifestAttributesBase struct {
+ // Digest - Manifest
+ Digest *string `json:"digest,omitempty"`
+ // ImageSize - Image size
+ ImageSize *int32 `json:"imageSize,omitempty"`
+ // CreatedTime - Created time
+ CreatedTime *string `json:"createdTime,omitempty"`
+ // LastUpdateTime - Last update time
+ LastUpdateTime *string `json:"lastUpdateTime,omitempty"`
+ // Architecture - CPU architecture
+ Architecture *string `json:"architecture,omitempty"`
+ // Os - Operating system
+ Os *string `json:"os,omitempty"`
+ // MediaType - Media type
+ MediaType *string `json:"mediaType,omitempty"`
+ // ConfigMediaType - Config blob media type
+ ConfigMediaType *string `json:"configMediaType,omitempty"`
+ // Tags - List of tags
+ Tags *[]string `json:"tags,omitempty"`
+ // ChangeableAttributes - Changeable attributes
+ ChangeableAttributes *ChangeableAttributes `json:"changeableAttributes,omitempty"`
+}
+
+// AcrManifests manifest attributes
+type AcrManifests struct {
+ autorest.Response `json:"-"`
+ // Registry - Registry name
+ Registry *string `json:"registry,omitempty"`
+ // ImageName - Image name
+ ImageName *string `json:"imageName,omitempty"`
+ // ManifestsAttributes - List of manifests
+ ManifestsAttributes *[]AcrManifestAttributesBase `json:"manifests,omitempty"`
+}
+
+// AcrRepositoryTags list of tag details
+type AcrRepositoryTags struct {
+ autorest.Response `json:"-"`
+ // Registry - Registry name
+ Registry *string `json:"registry,omitempty"`
+ // ImageName - Image name
+ ImageName *string `json:"imageName,omitempty"`
+ // TagsAttributes - List of tag attribute details
+ TagsAttributes *[]AcrTagAttributesBase `json:"tags,omitempty"`
+}
+
+// AcrTagAttributes tag attributes
+type AcrTagAttributes struct {
+ autorest.Response `json:"-"`
+ // Registry - Registry name
+ Registry *string `json:"registry,omitempty"`
+ // ImageName - Image name
+ ImageName *string `json:"imageName,omitempty"`
+ // TagAttributes - List of tag attribute details
+ TagAttributes *AcrTagAttributesBase `json:"tag,omitempty"`
+}
+
+// AcrTagAttributesBase tag attribute details
+type AcrTagAttributesBase struct {
+ // Name - Tag name
+ Name *string `json:"name,omitempty"`
+ // Digest - Tag digest
+ Digest *string `json:"digest,omitempty"`
+ // CreatedTime - Tag created time
+ CreatedTime *string `json:"createdTime,omitempty"`
+ // LastUpdateTime - Tag last update time
+ LastUpdateTime *string `json:"lastUpdateTime,omitempty"`
+ // Signed - Is signed
+ Signed *bool `json:"signed,omitempty"`
+ // ChangeableAttributes - Changeable attributes
+ ChangeableAttributes *ChangeableAttributes `json:"changeableAttributes,omitempty"`
+}
+
+// ChangeableAttributes ...
+type ChangeableAttributes struct {
+ // DeleteEnabled - Delete enabled
+ DeleteEnabled *bool `json:"deleteEnabled,omitempty"`
+ // WriteEnabled - Write enabled
+ WriteEnabled *bool `json:"writeEnabled,omitempty"`
+ // ListEnabled - List enabled
+ ListEnabled *bool `json:"listEnabled,omitempty"`
+ // ReadEnabled - Read enabled
+ ReadEnabled *bool `json:"readEnabled,omitempty"`
+}
+
+// DeletedRepository deleted repository
+type DeletedRepository struct {
+ autorest.Response `json:"-"`
+ // ManifestsDeleted - SHA of the deleted image
+ ManifestsDeleted *[]string `json:"manifestsDeleted,omitempty"`
+ // TagsDeleted - Tag of the deleted image
+ TagsDeleted *[]string `json:"tagsDeleted,omitempty"`
+}
+
+// FsLayer image layer information
+type FsLayer struct {
+ // BlobSum - SHA of an image layer
+ BlobSum *string `json:"blobSum,omitempty"`
+}
+
+// History a list of unstructured historical data for v1 compatibility
+type History struct {
+ // V1Compatibility - The raw v1 compatibility information
+ V1Compatibility *string `json:"v1Compatibility,omitempty"`
+}
+
+// ImageSignature signature of a signed manifest
+type ImageSignature struct {
+ // Header - A JSON web signature
+ Header *JWK `json:"header,omitempty"`
+ // Signature - A signature for the image manifest, signed by a libtrust private key
+ Signature *string `json:"signature,omitempty"`
+ // Protected - The signed protected header
+ Protected *string `json:"protected,omitempty"`
+}
+
+// JWK a JSON web signature
+type JWK struct {
+ Jwk *JWKHeader `json:"jwk,omitempty"`
+ // Alg - The algorithm used to sign or encrypt the JWT
+ Alg *string `json:"alg,omitempty"`
+}
+
+// JWKHeader JSON web key parameter
+type JWKHeader struct {
+ // Crv - crv value
+ Crv *string `json:"crv,omitempty"`
+ // Kid - kid value
+ Kid *string `json:"kid,omitempty"`
+ // Kty - kty value
+ Kty *string `json:"kty,omitempty"`
+ // X - x value
+ X *string `json:"x,omitempty"`
+ // Y - y value
+ Y *string `json:"y,omitempty"`
+}
+
+// Manifest returns the requested manifest file
+type Manifest struct {
+ autorest.Response `json:"-"`
+ // SchemaVersion - Schema version
+ SchemaVersion *int32 `json:"schemaVersion,omitempty"`
+ // MediaType - Media type usually application/vnd.docker.distribution.manifest.v2+json if this is in the accept header
+ MediaType *string `json:"mediaType,omitempty"`
+ // Config - V2 image config descriptor
+ Config *V2Descriptor `json:"config,omitempty"`
+ // Layers - List of V2 image layer information
+ Layers *[]V2Descriptor `json:"layers,omitempty"`
+ // Architecture - CPU architecture
+ Architecture *string `json:"architecture,omitempty"`
+ // Name - Image name
+ Name *string `json:"name,omitempty"`
+ // Tag - Image tag
+ Tag *string `json:"tag,omitempty"`
+ // FsLayers - List of layer information
+ FsLayers *[]FsLayer `json:"fsLayers,omitempty"`
+ // History - Image history
+ History *[]History `json:"history,omitempty"`
+ // Signatures - Image signature
+ Signatures *[]ImageSignature `json:"signatures,omitempty"`
+}
+
+// ManifestAttributesManifest list of manifest attributes
+type ManifestAttributesManifest struct {
+ // References - List of manifest attributes details
+ References *[]ManifestAttributesManifestReferences `json:"references,omitempty"`
+ // QuarantineTag - Quarantine tag name
+ QuarantineTag *string `json:"quarantineTag,omitempty"`
+}
+
+// ManifestAttributesManifestReferences manifest attributes details
+type ManifestAttributesManifestReferences struct {
+ // Digest - Manifest digest
+ Digest *string `json:"digest,omitempty"`
+ // Architecture - CPU architecture
+ Architecture *string `json:"architecture,omitempty"`
+ // Os - Operating system
+ Os *string `json:"os,omitempty"`
+}
+
+// ManifestChangeableAttributes changeable attributes
+type ManifestChangeableAttributes struct {
+ // DeleteEnabled - Delete enabled
+ DeleteEnabled *bool `json:"deleteEnabled,omitempty"`
+ // WriteEnabled - Write enabled
+ WriteEnabled *bool `json:"writeEnabled,omitempty"`
+ // ListEnabled - List enabled
+ ListEnabled *bool `json:"listEnabled,omitempty"`
+ // ReadEnabled - Read enabled
+ ReadEnabled *bool `json:"readEnabled,omitempty"`
+ // QuarantineState - Quarantine state
+ QuarantineState *string `json:"quarantineState,omitempty"`
+ // QuarantineDetails - Quarantine details
+ QuarantineDetails *string `json:"quarantineDetails,omitempty"`
+}
+
+// RefreshToken ...
+type RefreshToken struct {
+ autorest.Response `json:"-"`
+ // RefreshToken - The refresh token to be used for generating access tokens
+ RefreshToken *string `json:"refresh_token,omitempty"`
+}
+
+// Repositories list of repositories
+type Repositories struct {
+ autorest.Response `json:"-"`
+ // Names - Repository names
+ Names *[]string `json:"repositories,omitempty"`
+}
+
+// RepositoryAttributes repository attributes
+type RepositoryAttributes struct {
+ autorest.Response `json:"-"`
+ // Registry - Registry name
+ Registry *string `json:"registry,omitempty"`
+ // ImageName - Image name
+ ImageName *string `json:"imageName,omitempty"`
+ // CreatedTime - Image created time
+ CreatedTime *string `json:"createdTime,omitempty"`
+ // LastUpdateTime - Image last update time
+ LastUpdateTime *string `json:"lastUpdateTime,omitempty"`
+ // ManifestCount - Number of the manifests
+ ManifestCount *int32 `json:"manifestCount,omitempty"`
+ // TagCount - Number of the tags
+ TagCount *int32 `json:"tagCount,omitempty"`
+ // ChangeableAttributes - Changeable attributes
+ ChangeableAttributes *ChangeableAttributes `json:"changeableAttributes,omitempty"`
+}
+
+// RepositoryTags result of the request to list tags of the image
+type RepositoryTags struct {
+ autorest.Response `json:"-"`
+ // Name - Name of the image
+ Name *string `json:"name,omitempty"`
+ // Tags - List of tags
+ Tags *[]string `json:"tags,omitempty"`
+}
+
+// SetObject ...
+type SetObject struct {
+ autorest.Response `json:"-"`
+ Value interface{} `json:"value,omitempty"`
+}
+
+// TagAttributes tag attributes
+type TagAttributes struct {
+ // Registry - Registry name
+ Registry *string `json:"registry,omitempty"`
+ // ImageName - Image name
+ ImageName *string `json:"imageName,omitempty"`
+ // Tag - Tag attributes
+ Tag *TagAttributesTag `json:"tag,omitempty"`
+}
+
+// TagAttributesTag tag
+type TagAttributesTag struct {
+ // SignatureRecord - SignatureRecord value
+ SignatureRecord *string `json:"signatureRecord,omitempty"`
+}
+
+// V2Descriptor docker V2 image layer descriptor including config and layers
+type V2Descriptor struct {
+ // MediaType - Layer media type
+ MediaType *string `json:"mediaType,omitempty"`
+ // Size - Layer size
+ Size *int32 `json:"size,omitempty"`
+ // Digest - Layer digest
+ Digest *string `json:"digest,omitempty"`
+}
diff --git a/services/preview/containerregistry/runtime/2019-07/containerregistry/version.go b/services/preview/containerregistry/runtime/2019-07/containerregistry/version.go
new file mode 100644
index 000000000000..4fc1972256f7
--- /dev/null
+++ b/services/preview/containerregistry/runtime/2019-07/containerregistry/version.go
@@ -0,0 +1,30 @@
+package containerregistry
+
+import "github.com/Azure/azure-sdk-for-go/version"
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+// UserAgent returns the UserAgent string to use when sending http.Requests.
+func UserAgent() string {
+ return "Azure-SDK-For-Go/" + version.Number + " containerregistry/2019-07-15-preview"
+}
+
+// Version returns the semantic version (see http://semver.org) of the client.
+func Version() string {
+ return version.Number
+}
diff --git a/services/preview/datafactory/mgmt/2017-09-01-preview/datafactory/activityruns.go b/services/preview/datafactory/mgmt/2017-09-01-preview/datafactory/activityruns.go
index 6ccdcd449d5a..718dfd3b89aa 100644
--- a/services/preview/datafactory/mgmt/2017-09-01-preview/datafactory/activityruns.go
+++ b/services/preview/datafactory/mgmt/2017-09-01-preview/datafactory/activityruns.go
@@ -74,9 +74,11 @@ func (client ActivityRunsClient) ListByPipelineRun(ctx context.Context, resource
{Target: "factoryName", Name: validation.MinLength, Rule: 3, Chain: nil},
{Target: "factoryName", Name: validation.Pattern, Rule: `^[A-Za-z0-9]+(?:-[A-Za-z0-9]+)*$`, Chain: nil}}},
{TargetValue: linkedServiceName,
- Constraints: []validation.Constraint{{Target: "linkedServiceName", Name: validation.MaxLength, Rule: 260, Chain: nil},
- {Target: "linkedServiceName", Name: validation.MinLength, Rule: 1, Chain: nil},
- {Target: "linkedServiceName", Name: validation.Pattern, Rule: `^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$`, Chain: nil}}}}); err != nil {
+ Constraints: []validation.Constraint{{Target: "linkedServiceName", Name: validation.Empty, Rule: false,
+ Chain: []validation.Constraint{{Target: "linkedServiceName", Name: validation.MaxLength, Rule: 260, Chain: nil},
+ {Target: "linkedServiceName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "linkedServiceName", Name: validation.Pattern, Rule: `^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$`, Chain: nil},
+ }}}}}); err != nil {
return result, validation.NewError("datafactory.ActivityRunsClient", "ListByPipelineRun", err.Error())
}
diff --git a/services/preview/datamigration/mgmt/2017-11-15-preview/datamigration/models.go b/services/preview/datamigration/mgmt/2017-11-15-preview/datamigration/models.go
index 4b86923d28d5..00e0738b1248 100644
--- a/services/preview/datamigration/mgmt/2017-11-15-preview/datamigration/models.go
+++ b/services/preview/datamigration/mgmt/2017-11-15-preview/datamigration/models.go
@@ -507,28 +507,28 @@ const (
TaskTypeGetUserTablesSQL TaskType = "GetUserTables.Sql"
// TaskTypeMigrateSQLServerSQLDb ...
TaskTypeMigrateSQLServerSQLDb TaskType = "Migrate.SqlServer.SqlDb"
- // TaskTypeUnknown ...
- TaskTypeUnknown TaskType = "Unknown"
+ // TaskTypeProjectTaskProperties ...
+ TaskTypeProjectTaskProperties TaskType = "ProjectTaskProperties"
)
// PossibleTaskTypeValues returns an array of possible values for the TaskType const type.
func PossibleTaskTypeValues() []TaskType {
- return []TaskType{TaskTypeConnectToSourceSQLServer, TaskTypeConnectToTargetSQLDb, TaskTypeGetUserTablesSQL, TaskTypeMigrateSQLServerSQLDb, TaskTypeUnknown}
+ return []TaskType{TaskTypeConnectToSourceSQLServer, TaskTypeConnectToTargetSQLDb, TaskTypeGetUserTablesSQL, TaskTypeMigrateSQLServerSQLDb, TaskTypeProjectTaskProperties}
}
// Type enumerates the values for type.
type Type string
const (
+ // TypeConnectionInfo ...
+ TypeConnectionInfo Type = "ConnectionInfo"
// TypeSQLConnectionInfo ...
TypeSQLConnectionInfo Type = "SqlConnectionInfo"
- // TypeUnknown ...
- TypeUnknown Type = "Unknown"
)
// PossibleTypeValues returns an array of possible values for the Type const type.
func PossibleTypeValues() []Type {
- return []Type{TypeSQLConnectionInfo, TypeUnknown}
+ return []Type{TypeConnectionInfo, TypeSQLConnectionInfo}
}
// UpdateActionType enumerates the values for update action type.
@@ -627,7 +627,7 @@ type ConnectionInfo struct {
UserName *string `json:"userName,omitempty"`
// Password - Password credential.
Password *string `json:"password,omitempty"`
- // Type - Possible values include: 'TypeUnknown', 'TypeSQLConnectionInfo'
+ // Type - Possible values include: 'TypeConnectionInfo', 'TypeSQLConnectionInfo'
Type Type `json:"type,omitempty"`
}
@@ -670,7 +670,7 @@ func unmarshalBasicConnectionInfoArray(body []byte) ([]BasicConnectionInfo, erro
// MarshalJSON is the custom marshaler for ConnectionInfo.
func (ci ConnectionInfo) MarshalJSON() ([]byte, error) {
- ci.Type = TypeUnknown
+ ci.Type = TypeConnectionInfo
objectMap := make(map[string]interface{})
if ci.UserName != nil {
objectMap["userName"] = ci.UserName
@@ -903,7 +903,7 @@ type ConnectToSourceSQLServerTaskProperties struct {
Errors *[]ODataError `json:"errors,omitempty"`
// State - READ-ONLY; The state of the task. This is ignored if submitted. Possible values include: 'TaskStateUnknown', 'TaskStateQueued', 'TaskStateRunning', 'TaskStateCanceled', 'TaskStateSucceeded', 'TaskStateFailed', 'TaskStateFailedInputValidation', 'TaskStateFaulted'
State TaskState `json:"state,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceSQLServer'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceSQLServer'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -1046,7 +1046,7 @@ type ConnectToTargetSQLDbTaskProperties struct {
Errors *[]ODataError `json:"errors,omitempty"`
// State - READ-ONLY; The state of the task. This is ignored if submitted. Possible values include: 'TaskStateUnknown', 'TaskStateQueued', 'TaskStateRunning', 'TaskStateCanceled', 'TaskStateSucceeded', 'TaskStateFailed', 'TaskStateFailedInputValidation', 'TaskStateFaulted'
State TaskState `json:"state,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceSQLServer'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceSQLServer'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -1338,7 +1338,7 @@ type GetUserTablesSQLTaskProperties struct {
Errors *[]ODataError `json:"errors,omitempty"`
// State - READ-ONLY; The state of the task. This is ignored if submitted. Possible values include: 'TaskStateUnknown', 'TaskStateQueued', 'TaskStateRunning', 'TaskStateCanceled', 'TaskStateSucceeded', 'TaskStateFailed', 'TaskStateFailedInputValidation', 'TaskStateFaulted'
State TaskState `json:"state,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceSQLServer'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceSQLServer'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -1823,7 +1823,7 @@ type MigrateSQLServerSQLDbTaskProperties struct {
Errors *[]ODataError `json:"errors,omitempty"`
// State - READ-ONLY; The state of the task. This is ignored if submitted. Possible values include: 'TaskStateUnknown', 'TaskStateQueued', 'TaskStateRunning', 'TaskStateCanceled', 'TaskStateSucceeded', 'TaskStateFailed', 'TaskStateFailedInputValidation', 'TaskStateFaulted'
State TaskState `json:"state,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceSQLServer'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceSQLServer'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -2500,7 +2500,7 @@ type ProjectTaskProperties struct {
Errors *[]ODataError `json:"errors,omitempty"`
// State - READ-ONLY; The state of the task. This is ignored if submitted. Possible values include: 'TaskStateUnknown', 'TaskStateQueued', 'TaskStateRunning', 'TaskStateCanceled', 'TaskStateSucceeded', 'TaskStateFailed', 'TaskStateFailedInputValidation', 'TaskStateFaulted'
State TaskState `json:"state,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceSQLServer'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceSQLServer'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -2555,7 +2555,7 @@ func unmarshalBasicProjectTaskPropertiesArray(body []byte) ([]BasicProjectTaskPr
// MarshalJSON is the custom marshaler for ProjectTaskProperties.
func (ptp ProjectTaskProperties) MarshalJSON() ([]byte, error) {
- ptp.TaskType = TaskTypeUnknown
+ ptp.TaskType = TaskTypeProjectTaskProperties
objectMap := make(map[string]interface{})
if ptp.TaskType != "" {
objectMap["taskType"] = ptp.TaskType
@@ -3840,7 +3840,7 @@ type SQLConnectionInfo struct {
UserName *string `json:"userName,omitempty"`
// Password - Password credential.
Password *string `json:"password,omitempty"`
- // Type - Possible values include: 'TypeUnknown', 'TypeSQLConnectionInfo'
+ // Type - Possible values include: 'TypeConnectionInfo', 'TypeSQLConnectionInfo'
Type Type `json:"type,omitempty"`
}
diff --git a/services/preview/datamigration/mgmt/2018-03-31-preview/datamigration/models.go b/services/preview/datamigration/mgmt/2018-03-31-preview/datamigration/models.go
index ecf162b5e890..121897b88103 100644
--- a/services/preview/datamigration/mgmt/2018-03-31-preview/datamigration/models.go
+++ b/services/preview/datamigration/mgmt/2018-03-31-preview/datamigration/models.go
@@ -595,30 +595,30 @@ const (
TaskTypeMigrateSQLServerAzureSQLDbMI TaskType = "Migrate.SqlServer.AzureSqlDbMI"
// TaskTypeMigrateSQLServerSQLDb ...
TaskTypeMigrateSQLServerSQLDb TaskType = "Migrate.SqlServer.SqlDb"
- // TaskTypeUnknown ...
- TaskTypeUnknown TaskType = "Unknown"
+ // TaskTypeProjectTaskProperties ...
+ TaskTypeProjectTaskProperties TaskType = "ProjectTaskProperties"
// TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI ...
TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI TaskType = "ValidateMigrationInput.SqlServer.AzureSqlDbMI"
)
// PossibleTaskTypeValues returns an array of possible values for the TaskType const type.
func PossibleTaskTypeValues() []TaskType {
- return []TaskType{TaskTypeConnectToSourceSQLServer, TaskTypeConnectToTargetAzureSQLDbMI, TaskTypeConnectToTargetSQLDb, TaskTypeGetUserTablesSQL, TaskTypeMigrateSQLServerAzureSQLDbMI, TaskTypeMigrateSQLServerSQLDb, TaskTypeUnknown, TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI}
+ return []TaskType{TaskTypeConnectToSourceSQLServer, TaskTypeConnectToTargetAzureSQLDbMI, TaskTypeConnectToTargetSQLDb, TaskTypeGetUserTablesSQL, TaskTypeMigrateSQLServerAzureSQLDbMI, TaskTypeMigrateSQLServerSQLDb, TaskTypeProjectTaskProperties, TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI}
}
// Type enumerates the values for type.
type Type string
const (
+ // TypeConnectionInfo ...
+ TypeConnectionInfo Type = "ConnectionInfo"
// TypeSQLConnectionInfo ...
TypeSQLConnectionInfo Type = "SqlConnectionInfo"
- // TypeUnknown ...
- TypeUnknown Type = "Unknown"
)
// PossibleTypeValues returns an array of possible values for the Type const type.
func PossibleTypeValues() []Type {
- return []Type{TypeSQLConnectionInfo, TypeUnknown}
+ return []Type{TypeConnectionInfo, TypeSQLConnectionInfo}
}
// UpdateActionType enumerates the values for update action type.
@@ -723,7 +723,7 @@ type ConnectionInfo struct {
UserName *string `json:"userName,omitempty"`
// Password - Password credential.
Password *string `json:"password,omitempty"`
- // Type - Possible values include: 'TypeUnknown', 'TypeSQLConnectionInfo'
+ // Type - Possible values include: 'TypeConnectionInfo', 'TypeSQLConnectionInfo'
Type Type `json:"type,omitempty"`
}
@@ -766,7 +766,7 @@ func unmarshalBasicConnectionInfoArray(body []byte) ([]BasicConnectionInfo, erro
// MarshalJSON is the custom marshaler for ConnectionInfo.
func (ci ConnectionInfo) MarshalJSON() ([]byte, error) {
- ci.Type = TypeUnknown
+ ci.Type = TypeConnectionInfo
objectMap := make(map[string]interface{})
if ci.UserName != nil {
objectMap["userName"] = ci.UserName
@@ -1167,7 +1167,7 @@ type ConnectToSourceSQLServerTaskProperties struct {
Errors *[]ODataError `json:"errors,omitempty"`
// State - READ-ONLY; The state of the task. This is ignored if submitted. Possible values include: 'TaskStateUnknown', 'TaskStateQueued', 'TaskStateRunning', 'TaskStateCanceled', 'TaskStateSucceeded', 'TaskStateFailed', 'TaskStateFailedInputValidation', 'TaskStateFaulted'
State TaskState `json:"state,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeConnectToSourceSQLServer'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeConnectToSourceSQLServer'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -1325,7 +1325,7 @@ type ConnectToTargetSQLDbTaskProperties struct {
Errors *[]ODataError `json:"errors,omitempty"`
// State - READ-ONLY; The state of the task. This is ignored if submitted. Possible values include: 'TaskStateUnknown', 'TaskStateQueued', 'TaskStateRunning', 'TaskStateCanceled', 'TaskStateSucceeded', 'TaskStateFailed', 'TaskStateFailedInputValidation', 'TaskStateFaulted'
State TaskState `json:"state,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeConnectToSourceSQLServer'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeConnectToSourceSQLServer'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -1422,7 +1422,7 @@ type ConnectToTargetSQLMITaskProperties struct {
Errors *[]ODataError `json:"errors,omitempty"`
// State - READ-ONLY; The state of the task. This is ignored if submitted. Possible values include: 'TaskStateUnknown', 'TaskStateQueued', 'TaskStateRunning', 'TaskStateCanceled', 'TaskStateSucceeded', 'TaskStateFailed', 'TaskStateFailedInputValidation', 'TaskStateFaulted'
State TaskState `json:"state,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeConnectToSourceSQLServer'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeConnectToSourceSQLServer'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -1739,7 +1739,7 @@ type GetUserTablesSQLTaskProperties struct {
Errors *[]ODataError `json:"errors,omitempty"`
// State - READ-ONLY; The state of the task. This is ignored if submitted. Possible values include: 'TaskStateUnknown', 'TaskStateQueued', 'TaskStateRunning', 'TaskStateCanceled', 'TaskStateSucceeded', 'TaskStateFailed', 'TaskStateFailedInputValidation', 'TaskStateFaulted'
State TaskState `json:"state,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeConnectToSourceSQLServer'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeConnectToSourceSQLServer'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -2237,7 +2237,7 @@ type MigrateSQLServerSQLDbTaskProperties struct {
Errors *[]ODataError `json:"errors,omitempty"`
// State - READ-ONLY; The state of the task. This is ignored if submitted. Possible values include: 'TaskStateUnknown', 'TaskStateQueued', 'TaskStateRunning', 'TaskStateCanceled', 'TaskStateSucceeded', 'TaskStateFailed', 'TaskStateFailedInputValidation', 'TaskStateFaulted'
State TaskState `json:"state,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeConnectToSourceSQLServer'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeConnectToSourceSQLServer'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -2858,7 +2858,7 @@ type MigrateSQLServerSQLMITaskProperties struct {
Errors *[]ODataError `json:"errors,omitempty"`
// State - READ-ONLY; The state of the task. This is ignored if submitted. Possible values include: 'TaskStateUnknown', 'TaskStateQueued', 'TaskStateRunning', 'TaskStateCanceled', 'TaskStateSucceeded', 'TaskStateFailed', 'TaskStateFailedInputValidation', 'TaskStateFaulted'
State TaskState `json:"state,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeConnectToSourceSQLServer'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeConnectToSourceSQLServer'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -3574,7 +3574,7 @@ type ProjectTaskProperties struct {
Errors *[]ODataError `json:"errors,omitempty"`
// State - READ-ONLY; The state of the task. This is ignored if submitted. Possible values include: 'TaskStateUnknown', 'TaskStateQueued', 'TaskStateRunning', 'TaskStateCanceled', 'TaskStateSucceeded', 'TaskStateFailed', 'TaskStateFailedInputValidation', 'TaskStateFaulted'
State TaskState `json:"state,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeConnectToSourceSQLServer'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeConnectToSourceSQLServer'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -3641,7 +3641,7 @@ func unmarshalBasicProjectTaskPropertiesArray(body []byte) ([]BasicProjectTaskPr
// MarshalJSON is the custom marshaler for ProjectTaskProperties.
func (ptp ProjectTaskProperties) MarshalJSON() ([]byte, error) {
- ptp.TaskType = TaskTypeUnknown
+ ptp.TaskType = TaskTypeProjectTaskProperties
objectMap := make(map[string]interface{})
if ptp.TaskType != "" {
objectMap["taskType"] = ptp.TaskType
@@ -4941,7 +4941,7 @@ type SQLConnectionInfo struct {
UserName *string `json:"userName,omitempty"`
// Password - Password credential.
Password *string `json:"password,omitempty"`
- // Type - Possible values include: 'TypeUnknown', 'TypeSQLConnectionInfo'
+ // Type - Possible values include: 'TypeConnectionInfo', 'TypeSQLConnectionInfo'
Type Type `json:"type,omitempty"`
}
@@ -5222,7 +5222,7 @@ type ValidateMigrationInputSQLServerSQLMITaskProperties struct {
Errors *[]ODataError `json:"errors,omitempty"`
// State - READ-ONLY; The state of the task. This is ignored if submitted. Possible values include: 'TaskStateUnknown', 'TaskStateQueued', 'TaskStateRunning', 'TaskStateCanceled', 'TaskStateSucceeded', 'TaskStateFailed', 'TaskStateFailedInputValidation', 'TaskStateFaulted'
State TaskState `json:"state,omitempty"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeConnectToSourceSQLServer'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeConnectToSourceSQLServer'
TaskType TaskType `json:"taskType,omitempty"`
}
diff --git a/services/preview/datamigration/mgmt/2018-07-15-preview/datamigration/models.go b/services/preview/datamigration/mgmt/2018-07-15-preview/datamigration/models.go
index b9e385731cff..372a4c7410d1 100644
--- a/services/preview/datamigration/mgmt/2018-07-15-preview/datamigration/models.go
+++ b/services/preview/datamigration/mgmt/2018-07-15-preview/datamigration/models.go
@@ -144,6 +144,8 @@ type CommandType string
const (
// CommandTypeCancel ...
CommandTypeCancel CommandType = "cancel"
+ // CommandTypeCommandProperties ...
+ CommandTypeCommandProperties CommandType = "CommandProperties"
// CommandTypeFinish ...
CommandTypeFinish CommandType = "finish"
// CommandTypeMigrateSQLServerAzureDbSQLMiComplete ...
@@ -152,13 +154,11 @@ const (
CommandTypeMigrateSyncCompleteDatabase CommandType = "Migrate.Sync.Complete.Database"
// CommandTypeRestart ...
CommandTypeRestart CommandType = "restart"
- // CommandTypeUnknown ...
- CommandTypeUnknown CommandType = "Unknown"
)
// PossibleCommandTypeValues returns an array of possible values for the CommandType const type.
func PossibleCommandTypeValues() []CommandType {
- return []CommandType{CommandTypeCancel, CommandTypeFinish, CommandTypeMigrateSQLServerAzureDbSQLMiComplete, CommandTypeMigrateSyncCompleteDatabase, CommandTypeRestart, CommandTypeUnknown}
+ return []CommandType{CommandTypeCancel, CommandTypeCommandProperties, CommandTypeFinish, CommandTypeMigrateSQLServerAzureDbSQLMiComplete, CommandTypeMigrateSyncCompleteDatabase, CommandTypeRestart}
}
// DatabaseCompatLevel enumerates the values for database compat level.
@@ -1255,8 +1255,8 @@ const (
TaskTypeMigrateMongoDb TaskType = "Migrate.MongoDb"
// TaskTypeMigrateMySQLAzureDbForMySQLSync ...
TaskTypeMigrateMySQLAzureDbForMySQLSync TaskType = "Migrate.MySql.AzureDbForMySql.Sync"
- // TaskTypeMigrateOracleSQLSync ...
- TaskTypeMigrateOracleSQLSync TaskType = "Migrate.Oracle.Sql.Sync"
+ // TaskTypeMigrateOracleAzureDbForPostgreSQLSync ...
+ TaskTypeMigrateOracleAzureDbForPostgreSQLSync TaskType = "Migrate.Oracle.AzureDbForPostgreSql.Sync"
// TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync ...
TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync TaskType = "Migrate.PostgreSql.AzureDbForPostgreSql.Sync"
// TaskTypeMigrateSchemaSQLServerSQLDb ...
@@ -1271,14 +1271,14 @@ const (
TaskTypeMigrateSQLServerSQLDb TaskType = "Migrate.SqlServer.SqlDb"
// TaskTypeMigrateSsis ...
TaskTypeMigrateSsis TaskType = "Migrate.Ssis"
+ // TaskTypeProjectTaskProperties ...
+ TaskTypeProjectTaskProperties TaskType = "ProjectTaskProperties"
// TaskTypeServiceCheckOCI ...
TaskTypeServiceCheckOCI TaskType = "Service.Check.OCI"
// TaskTypeServiceInstallOCI ...
TaskTypeServiceInstallOCI TaskType = "Service.Install.OCI"
// TaskTypeServiceUploadOCI ...
TaskTypeServiceUploadOCI TaskType = "Service.Upload.OCI"
- // TaskTypeUnknown ...
- TaskTypeUnknown TaskType = "Unknown"
// TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI ...
TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI TaskType = "ValidateMigrationInput.SqlServer.AzureSqlDbMI"
// TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS ...
@@ -1293,13 +1293,15 @@ const (
// PossibleTaskTypeValues returns an array of possible values for the TaskType const type.
func PossibleTaskTypeValues() []TaskType {
- return []TaskType{TaskTypeConnectMongoDb, TaskTypeConnectToSourceMySQL, TaskTypeConnectToSourceOracleSync, TaskTypeConnectToSourcePostgreSQLSync, TaskTypeConnectToSourceSQLServer, TaskTypeConnectToSourceSQLServerSync, TaskTypeConnectToTargetAzureDbForMySQL, TaskTypeConnectToTargetAzureDbForPostgreSQLSync, TaskTypeConnectToTargetAzureSQLDbMI, TaskTypeConnectToTargetAzureSQLDbMISyncLRS, TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync, TaskTypeConnectToTargetSQLDb, TaskTypeConnectToTargetSQLDbSync, TaskTypeGetTDECertificatesSQL, TaskTypeGetUserTablesAzureSQLDbSync, TaskTypeGetUserTablesOracle, TaskTypeGetUserTablesPostgreSQL, TaskTypeGetUserTablesSQL, TaskTypeMigrateMongoDb, TaskTypeMigrateMySQLAzureDbForMySQLSync, TaskTypeMigrateOracleSQLSync, TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync, TaskTypeMigrateSchemaSQLServerSQLDb, TaskTypeMigrateSQLServerAzureSQLDbMI, TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS, TaskTypeMigrateSQLServerAzureSQLDbSync, TaskTypeMigrateSQLServerSQLDb, TaskTypeMigrateSsis, TaskTypeServiceCheckOCI, TaskTypeServiceInstallOCI, TaskTypeServiceUploadOCI, TaskTypeUnknown, TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI, TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS, TaskTypeValidateMigrationInputSQLServerSQLDbSync, TaskTypeValidateMongoDb, TaskTypeValidateOracleAzureDbPostgreSQLSync}
+ return []TaskType{TaskTypeConnectMongoDb, TaskTypeConnectToSourceMySQL, TaskTypeConnectToSourceOracleSync, TaskTypeConnectToSourcePostgreSQLSync, TaskTypeConnectToSourceSQLServer, TaskTypeConnectToSourceSQLServerSync, TaskTypeConnectToTargetAzureDbForMySQL, TaskTypeConnectToTargetAzureDbForPostgreSQLSync, TaskTypeConnectToTargetAzureSQLDbMI, TaskTypeConnectToTargetAzureSQLDbMISyncLRS, TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync, TaskTypeConnectToTargetSQLDb, TaskTypeConnectToTargetSQLDbSync, TaskTypeGetTDECertificatesSQL, TaskTypeGetUserTablesAzureSQLDbSync, TaskTypeGetUserTablesOracle, TaskTypeGetUserTablesPostgreSQL, TaskTypeGetUserTablesSQL, TaskTypeMigrateMongoDb, TaskTypeMigrateMySQLAzureDbForMySQLSync, TaskTypeMigrateOracleAzureDbForPostgreSQLSync, TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync, TaskTypeMigrateSchemaSQLServerSQLDb, TaskTypeMigrateSQLServerAzureSQLDbMI, TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS, TaskTypeMigrateSQLServerAzureSQLDbSync, TaskTypeMigrateSQLServerSQLDb, TaskTypeMigrateSsis, TaskTypeProjectTaskProperties, TaskTypeServiceCheckOCI, TaskTypeServiceInstallOCI, TaskTypeServiceUploadOCI, TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI, TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS, TaskTypeValidateMigrationInputSQLServerSQLDbSync, TaskTypeValidateMongoDb, TaskTypeValidateOracleAzureDbPostgreSQLSync}
}
// Type enumerates the values for type.
type Type string
const (
+ // TypeConnectionInfo ...
+ TypeConnectionInfo Type = "ConnectionInfo"
// TypeMiSQLConnectionInfo ...
TypeMiSQLConnectionInfo Type = "MiSqlConnectionInfo"
// TypeMongoDbConnectionInfo ...
@@ -1312,13 +1314,11 @@ const (
TypePostgreSQLConnectionInfo Type = "PostgreSqlConnectionInfo"
// TypeSQLConnectionInfo ...
TypeSQLConnectionInfo Type = "SqlConnectionInfo"
- // TypeUnknown ...
- TypeUnknown Type = "Unknown"
)
// PossibleTypeValues returns an array of possible values for the Type const type.
func PossibleTypeValues() []Type {
- return []Type{TypeMiSQLConnectionInfo, TypeMongoDbConnectionInfo, TypeMySQLConnectionInfo, TypeOracleConnectionInfo, TypePostgreSQLConnectionInfo, TypeSQLConnectionInfo, TypeUnknown}
+ return []Type{TypeConnectionInfo, TypeMiSQLConnectionInfo, TypeMongoDbConnectionInfo, TypeMySQLConnectionInfo, TypeOracleConnectionInfo, TypePostgreSQLConnectionInfo, TypeSQLConnectionInfo}
}
// UpdateActionType enumerates the values for update action type.
@@ -1488,7 +1488,7 @@ type CheckOCIDriverTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -1794,7 +1794,7 @@ type CommandProperties struct {
Errors *[]ODataError `json:"errors,omitempty"`
// State - READ-ONLY; The state of the command. This is ignored if submitted. Possible values include: 'Unknown', 'Accepted', 'Running', 'Succeeded', 'Failed'
State CommandState `json:"state,omitempty"`
- // CommandType - Possible values include: 'CommandTypeUnknown', 'CommandTypeMigrateSQLServerAzureDbSQLMiComplete', 'CommandTypeMigrateSyncCompleteDatabase', 'CommandTypeCancel', 'CommandTypeFinish', 'CommandTypeRestart'
+ // CommandType - Possible values include: 'CommandTypeCommandProperties', 'CommandTypeMigrateSQLServerAzureDbSQLMiComplete', 'CommandTypeMigrateSyncCompleteDatabase', 'CommandTypeCancel', 'CommandTypeFinish', 'CommandTypeRestart'
CommandType CommandType `json:"commandType,omitempty"`
}
@@ -1853,7 +1853,7 @@ func unmarshalBasicCommandPropertiesArray(body []byte) ([]BasicCommandProperties
// MarshalJSON is the custom marshaler for CommandProperties.
func (cp CommandProperties) MarshalJSON() ([]byte, error) {
- cp.CommandType = CommandTypeUnknown
+ cp.CommandType = CommandTypeCommandProperties
objectMap := make(map[string]interface{})
if cp.CommandType != "" {
objectMap["commandType"] = cp.CommandType
@@ -1930,7 +1930,7 @@ type ConnectionInfo struct {
UserName *string `json:"userName,omitempty"`
// Password - Password credential.
Password *string `json:"password,omitempty"`
- // Type - Possible values include: 'TypeUnknown', 'TypeMiSQLConnectionInfo', 'TypePostgreSQLConnectionInfo', 'TypeOracleConnectionInfo', 'TypeMySQLConnectionInfo', 'TypeMongoDbConnectionInfo', 'TypeSQLConnectionInfo'
+ // Type - Possible values include: 'TypeConnectionInfo', 'TypeMiSQLConnectionInfo', 'TypePostgreSQLConnectionInfo', 'TypeOracleConnectionInfo', 'TypeMySQLConnectionInfo', 'TypeMongoDbConnectionInfo', 'TypeSQLConnectionInfo'
Type Type `json:"type,omitempty"`
}
@@ -1993,7 +1993,7 @@ func unmarshalBasicConnectionInfoArray(body []byte) ([]BasicConnectionInfo, erro
// MarshalJSON is the custom marshaler for ConnectionInfo.
func (ci ConnectionInfo) MarshalJSON() ([]byte, error) {
- ci.Type = TypeUnknown
+ ci.Type = TypeConnectionInfo
objectMap := make(map[string]interface{})
if ci.UserName != nil {
objectMap["userName"] = ci.UserName
@@ -2061,7 +2061,7 @@ type ConnectToMongoDbTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -2372,7 +2372,7 @@ type ConnectToSourceMySQLTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -2706,7 +2706,7 @@ type ConnectToSourceOracleSyncTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -3030,7 +3030,7 @@ type ConnectToSourcePostgreSQLSyncTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -3332,7 +3332,7 @@ type ConnectToSourceSQLServerSyncTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -4004,7 +4004,7 @@ type ConnectToSourceSQLServerTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -4329,7 +4329,7 @@ type ConnectToTargetAzureDbForMySQLTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -4655,7 +4655,7 @@ type ConnectToTargetAzureDbForPostgreSQLSyncTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -4986,7 +4986,7 @@ type ConnectToTargetOracleAzureDbForPostgreSQLSyncTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -5314,7 +5314,7 @@ type ConnectToTargetSQLDbTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -5636,7 +5636,7 @@ type ConnectToTargetSQLMISyncTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -5968,7 +5968,7 @@ type ConnectToTargetSQLMITaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -6279,7 +6279,7 @@ type ConnectToTargetSQLSQLDbSyncTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -7019,7 +7019,7 @@ type GetTdeCertificatesSQLTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -7341,7 +7341,7 @@ type GetUserTablesOracleTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -7663,7 +7663,7 @@ type GetUserTablesPostgreSQLTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -7997,7 +7997,7 @@ type GetUserTablesSQLSyncTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -8323,7 +8323,7 @@ type GetUserTablesSQLTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -8635,7 +8635,7 @@ type InstallOCIDriverTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -8947,7 +8947,7 @@ type MigrateMISyncCompleteCommandProperties struct {
Errors *[]ODataError `json:"errors,omitempty"`
// State - READ-ONLY; The state of the command. This is ignored if submitted. Possible values include: 'Unknown', 'Accepted', 'Running', 'Succeeded', 'Failed'
State CommandState `json:"state,omitempty"`
- // CommandType - Possible values include: 'CommandTypeUnknown', 'CommandTypeMigrateSQLServerAzureDbSQLMiComplete', 'CommandTypeMigrateSyncCompleteDatabase', 'CommandTypeCancel', 'CommandTypeFinish', 'CommandTypeRestart'
+ // CommandType - Possible values include: 'CommandTypeCommandProperties', 'CommandTypeMigrateSQLServerAzureDbSQLMiComplete', 'CommandTypeMigrateSyncCompleteDatabase', 'CommandTypeCancel', 'CommandTypeFinish', 'CommandTypeRestart'
CommandType CommandType `json:"commandType,omitempty"`
}
@@ -9012,7 +9012,7 @@ type MigrateMongoDbTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -9824,7 +9824,7 @@ type MigrateMySQLAzureDbForMySQLSyncTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -10125,13 +10125,13 @@ type MigrateOracleAzureDbForPostgreSQLSyncTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
// MarshalJSON is the custom marshaler for MigrateOracleAzureDbForPostgreSQLSyncTaskProperties.
func (moadfpsstp MigrateOracleAzureDbForPostgreSQLSyncTaskProperties) MarshalJSON() ([]byte, error) {
- moadfpsstp.TaskType = TaskTypeMigrateOracleSQLSync
+ moadfpsstp.TaskType = TaskTypeMigrateOracleAzureDbForPostgreSQLSync
objectMap := make(map[string]interface{})
if moadfpsstp.Input != nil {
objectMap["input"] = moadfpsstp.Input
@@ -11461,7 +11461,7 @@ type MigratePostgreSQLAzureDbForPostgreSQLSyncTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -12074,7 +12074,7 @@ type MigrateSchemaSQLServerSQLDbTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -12985,7 +12985,7 @@ type MigrateSQLServerSQLDbSyncTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -13685,7 +13685,7 @@ type MigrateSQLServerSQLDbTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -14289,7 +14289,7 @@ type MigrateSQLServerSQLMISyncTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -15070,7 +15070,7 @@ type MigrateSQLServerSQLMITaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -15595,7 +15595,7 @@ type MigrateSsisTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -15908,7 +15908,7 @@ type MigrateSyncCompleteCommandProperties struct {
Errors *[]ODataError `json:"errors,omitempty"`
// State - READ-ONLY; The state of the command. This is ignored if submitted. Possible values include: 'Unknown', 'Accepted', 'Running', 'Succeeded', 'Failed'
State CommandState `json:"state,omitempty"`
- // CommandType - Possible values include: 'CommandTypeUnknown', 'CommandTypeMigrateSQLServerAzureDbSQLMiComplete', 'CommandTypeMigrateSyncCompleteDatabase', 'CommandTypeCancel', 'CommandTypeFinish', 'CommandTypeRestart'
+ // CommandType - Possible values include: 'CommandTypeCommandProperties', 'CommandTypeMigrateSQLServerAzureDbSQLMiComplete', 'CommandTypeMigrateSyncCompleteDatabase', 'CommandTypeCancel', 'CommandTypeFinish', 'CommandTypeRestart'
CommandType CommandType `json:"commandType,omitempty"`
}
@@ -16066,7 +16066,7 @@ type MiSQLConnectionInfo struct {
UserName *string `json:"userName,omitempty"`
// Password - Password credential.
Password *string `json:"password,omitempty"`
- // Type - Possible values include: 'TypeUnknown', 'TypeMiSQLConnectionInfo', 'TypePostgreSQLConnectionInfo', 'TypeOracleConnectionInfo', 'TypeMySQLConnectionInfo', 'TypeMongoDbConnectionInfo', 'TypeSQLConnectionInfo'
+ // Type - Possible values include: 'TypeConnectionInfo', 'TypeMiSQLConnectionInfo', 'TypePostgreSQLConnectionInfo', 'TypeOracleConnectionInfo', 'TypeMySQLConnectionInfo', 'TypeMongoDbConnectionInfo', 'TypeSQLConnectionInfo'
Type Type `json:"type,omitempty"`
}
@@ -16137,7 +16137,7 @@ type MongoDbCancelCommand struct {
Errors *[]ODataError `json:"errors,omitempty"`
// State - READ-ONLY; The state of the command. This is ignored if submitted. Possible values include: 'Unknown', 'Accepted', 'Running', 'Succeeded', 'Failed'
State CommandState `json:"state,omitempty"`
- // CommandType - Possible values include: 'CommandTypeUnknown', 'CommandTypeMigrateSQLServerAzureDbSQLMiComplete', 'CommandTypeMigrateSyncCompleteDatabase', 'CommandTypeCancel', 'CommandTypeFinish', 'CommandTypeRestart'
+ // CommandType - Possible values include: 'CommandTypeCommandProperties', 'CommandTypeMigrateSQLServerAzureDbSQLMiComplete', 'CommandTypeMigrateSyncCompleteDatabase', 'CommandTypeCancel', 'CommandTypeFinish', 'CommandTypeRestart'
CommandType CommandType `json:"commandType,omitempty"`
}
@@ -16332,7 +16332,7 @@ type MongoDbConnectionInfo struct {
UserName *string `json:"userName,omitempty"`
// Password - Password credential.
Password *string `json:"password,omitempty"`
- // Type - Possible values include: 'TypeUnknown', 'TypeMiSQLConnectionInfo', 'TypePostgreSQLConnectionInfo', 'TypeOracleConnectionInfo', 'TypeMySQLConnectionInfo', 'TypeMongoDbConnectionInfo', 'TypeSQLConnectionInfo'
+ // Type - Possible values include: 'TypeConnectionInfo', 'TypeMiSQLConnectionInfo', 'TypePostgreSQLConnectionInfo', 'TypeOracleConnectionInfo', 'TypeMySQLConnectionInfo', 'TypeMongoDbConnectionInfo', 'TypeSQLConnectionInfo'
Type Type `json:"type,omitempty"`
}
@@ -16538,7 +16538,7 @@ type MongoDbFinishCommand struct {
Errors *[]ODataError `json:"errors,omitempty"`
// State - READ-ONLY; The state of the command. This is ignored if submitted. Possible values include: 'Unknown', 'Accepted', 'Running', 'Succeeded', 'Failed'
State CommandState `json:"state,omitempty"`
- // CommandType - Possible values include: 'CommandTypeUnknown', 'CommandTypeMigrateSQLServerAzureDbSQLMiComplete', 'CommandTypeMigrateSyncCompleteDatabase', 'CommandTypeCancel', 'CommandTypeFinish', 'CommandTypeRestart'
+ // CommandType - Possible values include: 'CommandTypeCommandProperties', 'CommandTypeMigrateSQLServerAzureDbSQLMiComplete', 'CommandTypeMigrateSyncCompleteDatabase', 'CommandTypeCancel', 'CommandTypeFinish', 'CommandTypeRestart'
CommandType CommandType `json:"commandType,omitempty"`
}
@@ -16825,7 +16825,7 @@ type MongoDbRestartCommand struct {
Errors *[]ODataError `json:"errors,omitempty"`
// State - READ-ONLY; The state of the command. This is ignored if submitted. Possible values include: 'Unknown', 'Accepted', 'Running', 'Succeeded', 'Failed'
State CommandState `json:"state,omitempty"`
- // CommandType - Possible values include: 'CommandTypeUnknown', 'CommandTypeMigrateSQLServerAzureDbSQLMiComplete', 'CommandTypeMigrateSyncCompleteDatabase', 'CommandTypeCancel', 'CommandTypeFinish', 'CommandTypeRestart'
+ // CommandType - Possible values include: 'CommandTypeCommandProperties', 'CommandTypeMigrateSQLServerAzureDbSQLMiComplete', 'CommandTypeMigrateSyncCompleteDatabase', 'CommandTypeCancel', 'CommandTypeFinish', 'CommandTypeRestart'
CommandType CommandType `json:"commandType,omitempty"`
}
@@ -16921,7 +16921,7 @@ type MySQLConnectionInfo struct {
UserName *string `json:"userName,omitempty"`
// Password - Password credential.
Password *string `json:"password,omitempty"`
- // Type - Possible values include: 'TypeUnknown', 'TypeMiSQLConnectionInfo', 'TypePostgreSQLConnectionInfo', 'TypeOracleConnectionInfo', 'TypeMySQLConnectionInfo', 'TypeMongoDbConnectionInfo', 'TypeSQLConnectionInfo'
+ // Type - Possible values include: 'TypeConnectionInfo', 'TypeMiSQLConnectionInfo', 'TypePostgreSQLConnectionInfo', 'TypeOracleConnectionInfo', 'TypeMySQLConnectionInfo', 'TypeMongoDbConnectionInfo', 'TypeSQLConnectionInfo'
Type Type `json:"type,omitempty"`
}
@@ -17088,7 +17088,7 @@ type OracleConnectionInfo struct {
UserName *string `json:"userName,omitempty"`
// Password - Password credential.
Password *string `json:"password,omitempty"`
- // Type - Possible values include: 'TypeUnknown', 'TypeMiSQLConnectionInfo', 'TypePostgreSQLConnectionInfo', 'TypeOracleConnectionInfo', 'TypeMySQLConnectionInfo', 'TypeMongoDbConnectionInfo', 'TypeSQLConnectionInfo'
+ // Type - Possible values include: 'TypeConnectionInfo', 'TypeMiSQLConnectionInfo', 'TypePostgreSQLConnectionInfo', 'TypeOracleConnectionInfo', 'TypeMySQLConnectionInfo', 'TypeMongoDbConnectionInfo', 'TypeSQLConnectionInfo'
Type Type `json:"type,omitempty"`
}
@@ -17187,7 +17187,7 @@ type PostgreSQLConnectionInfo struct {
UserName *string `json:"userName,omitempty"`
// Password - Password credential.
Password *string `json:"password,omitempty"`
- // Type - Possible values include: 'TypeUnknown', 'TypeMiSQLConnectionInfo', 'TypePostgreSQLConnectionInfo', 'TypeOracleConnectionInfo', 'TypeMySQLConnectionInfo', 'TypeMongoDbConnectionInfo', 'TypeSQLConnectionInfo'
+ // Type - Possible values include: 'TypeConnectionInfo', 'TypeMiSQLConnectionInfo', 'TypePostgreSQLConnectionInfo', 'TypeOracleConnectionInfo', 'TypeMySQLConnectionInfo', 'TypeMongoDbConnectionInfo', 'TypeSQLConnectionInfo'
Type Type `json:"type,omitempty"`
}
@@ -17773,7 +17773,7 @@ type ProjectTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -17813,7 +17813,7 @@ func unmarshalBasicProjectTaskProperties(body []byte) (BasicProjectTaskPropertie
var vmisssdstp ValidateMigrationInputSQLServerSQLDbSyncTaskProperties
err := json.Unmarshal(body, &vmisssdstp)
return vmisssdstp, err
- case string(TaskTypeMigrateOracleSQLSync):
+ case string(TaskTypeMigrateOracleAzureDbForPostgreSQLSync):
var moadfpsstp MigrateOracleAzureDbForPostgreSQLSyncTaskProperties
err := json.Unmarshal(body, &moadfpsstp)
return moadfpsstp, err
@@ -17956,7 +17956,7 @@ func unmarshalBasicProjectTaskPropertiesArray(body []byte) ([]BasicProjectTaskPr
// MarshalJSON is the custom marshaler for ProjectTaskProperties.
func (ptp ProjectTaskProperties) MarshalJSON() ([]byte, error) {
- ptp.TaskType = TaskTypeUnknown
+ ptp.TaskType = TaskTypeProjectTaskProperties
objectMap := make(map[string]interface{})
if ptp.ClientData != nil {
objectMap["clientData"] = ptp.ClientData
@@ -19499,7 +19499,7 @@ type SQLConnectionInfo struct {
UserName *string `json:"userName,omitempty"`
// Password - Password credential.
Password *string `json:"password,omitempty"`
- // Type - Possible values include: 'TypeUnknown', 'TypeMiSQLConnectionInfo', 'TypePostgreSQLConnectionInfo', 'TypeOracleConnectionInfo', 'TypeMySQLConnectionInfo', 'TypeMongoDbConnectionInfo', 'TypeSQLConnectionInfo'
+ // Type - Possible values include: 'TypeConnectionInfo', 'TypeMiSQLConnectionInfo', 'TypePostgreSQLConnectionInfo', 'TypeOracleConnectionInfo', 'TypeMySQLConnectionInfo', 'TypeMongoDbConnectionInfo', 'TypeSQLConnectionInfo'
Type Type `json:"type,omitempty"`
}
@@ -19831,7 +19831,7 @@ type UploadOCIDriverTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -20133,7 +20133,7 @@ type ValidateMigrationInputSQLServerSQLDbSyncTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -20463,7 +20463,7 @@ type ValidateMigrationInputSQLServerSQLMISyncTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -20805,7 +20805,7 @@ type ValidateMigrationInputSQLServerSQLMITaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -21106,7 +21106,7 @@ type ValidateMongoDbTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
@@ -21407,7 +21407,7 @@ type ValidateOracleAzureDbForPostgreSQLSyncTaskProperties struct {
Commands *[]BasicCommandProperties `json:"commands,omitempty"`
// ClientData - Key value pairs of client data to attach meta data information to task
ClientData map[string]*string `json:"clientData"`
- // TaskType - Possible values include: 'TaskTypeUnknown', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
+ // TaskType - Possible values include: 'TaskTypeProjectTaskProperties', 'TaskTypeMigrateSsis', 'TaskTypeGetTDECertificatesSQL', 'TaskTypeValidateOracleAzureDbPostgreSQLSync', 'TaskTypeValidateMongoDb', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMISyncLRS', 'TaskTypeValidateMigrationInputSQLServerAzureSQLDbMI', 'TaskTypeValidateMigrationInputSQLServerSQLDbSync', 'TaskTypeMigrateOracleAzureDbForPostgreSQLSync', 'TaskTypeMigratePostgreSQLAzureDbForPostgreSQLSync', 'TaskTypeMigrateMySQLAzureDbForMySQLSync', 'TaskTypeMigrateSQLServerAzureSQLDbSync', 'TaskTypeMigrateSQLServerSQLDb', 'TaskTypeMigrateSQLServerAzureSQLDbMISyncLRS', 'TaskTypeMigrateSQLServerAzureSQLDbMI', 'TaskTypeMigrateMongoDb', 'TaskTypeConnectToTargetAzureDbForMySQL', 'TaskTypeConnectToTargetAzureSQLDbMISyncLRS', 'TaskTypeConnectToTargetAzureSQLDbMI', 'TaskTypeGetUserTablesPostgreSQL', 'TaskTypeGetUserTablesOracle', 'TaskTypeGetUserTablesAzureSQLDbSync', 'TaskTypeGetUserTablesSQL', 'TaskTypeConnectToTargetOracleAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetAzureDbForPostgreSQLSync', 'TaskTypeConnectToTargetSQLDbSync', 'TaskTypeConnectToTargetSQLDb', 'TaskTypeConnectToSourceOracleSync', 'TaskTypeConnectToSourcePostgreSQLSync', 'TaskTypeConnectToSourceSQLServerSync', 'TaskTypeConnectToSourceSQLServer', 'TaskTypeConnectMongoDb', 'TaskTypeConnectToSourceMySQL', 'TaskTypeMigrateSchemaSQLServerSQLDb', 'TaskTypeServiceCheckOCI', 'TaskTypeServiceUploadOCI', 'TaskTypeServiceInstallOCI'
TaskType TaskType `json:"taskType,omitempty"`
}
diff --git a/services/preview/frontdoor/mgmt/2019-04-01/frontdoor/models.go b/services/preview/frontdoor/mgmt/2019-04-01/frontdoor/models.go
index 3adc55d112a2..04af43060476 100644
--- a/services/preview/frontdoor/mgmt/2019-04-01/frontdoor/models.go
+++ b/services/preview/frontdoor/mgmt/2019-04-01/frontdoor/models.go
@@ -272,11 +272,13 @@ const (
RequestMethod MatchVariable = "RequestMethod"
// RequestURI ...
RequestURI MatchVariable = "RequestUri"
+ // SocketAddr ...
+ SocketAddr MatchVariable = "SocketAddr"
)
// PossibleMatchVariableValues returns an array of possible values for the MatchVariable const type.
func PossibleMatchVariableValues() []MatchVariable {
- return []MatchVariable{Cookies, PostArgs, QueryString, RemoteAddr, RequestBody, RequestHeader, RequestMethod, RequestURI}
+ return []MatchVariable{Cookies, PostArgs, QueryString, RemoteAddr, RequestBody, RequestHeader, RequestMethod, RequestURI, SocketAddr}
}
// NetworkOperationStatus enumerates the values for network operation status.
@@ -2771,7 +2773,7 @@ type ManagedRuleSetList struct {
// MatchCondition define a match condition.
type MatchCondition struct {
- // MatchVariable - Request variable to compare with. Possible values include: 'RemoteAddr', 'RequestMethod', 'QueryString', 'PostArgs', 'RequestURI', 'RequestHeader', 'RequestBody', 'Cookies'
+ // MatchVariable - Request variable to compare with. Possible values include: 'RemoteAddr', 'RequestMethod', 'QueryString', 'PostArgs', 'RequestURI', 'RequestHeader', 'RequestBody', 'Cookies', 'SocketAddr'
MatchVariable MatchVariable `json:"matchVariable,omitempty"`
// Selector - Match against a specific key from the QueryString, PostArgs, RequestHeader or Cookies variables. Default is null.
Selector *string `json:"selector,omitempty"`
diff --git a/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/backendpools.go b/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/backendpools.go
new file mode 100644
index 000000000000..e31544da40d5
--- /dev/null
+++ b/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/backendpools.go
@@ -0,0 +1,457 @@
+package frontdoor
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// BackendPoolsClient is the frontDoor Client
+type BackendPoolsClient struct {
+ BaseClient
+}
+
+// NewBackendPoolsClient creates an instance of the BackendPoolsClient client.
+func NewBackendPoolsClient(subscriptionID string) BackendPoolsClient {
+ return NewBackendPoolsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewBackendPoolsClientWithBaseURI creates an instance of the BackendPoolsClient client.
+func NewBackendPoolsClientWithBaseURI(baseURI string, subscriptionID string) BackendPoolsClient {
+ return BackendPoolsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate creates a new Backend Pool with the specified Pool name within the specified Front Door.
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// frontDoorName - name of the Front Door which is globally unique.
+// backendPoolName - name of the Backend Pool which is unique within the Front Door.
+// backendPoolParameters - backend Pool properties needed to create a new Pool.
+func (client BackendPoolsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, frontDoorName string, backendPoolName string, backendPoolParameters BackendPool) (result BackendPoolsCreateOrUpdateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BackendPoolsClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
+ {TargetValue: frontDoorName,
+ Constraints: []validation.Constraint{{Target: "frontDoorName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "frontDoorName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "frontDoorName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$`, Chain: nil}}},
+ {TargetValue: backendPoolName,
+ Constraints: []validation.Constraint{{Target: "backendPoolName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "backendPoolName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "backendPoolName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.BackendPoolsClient", "CreateOrUpdate", err.Error())
+ }
+
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, frontDoorName, backendPoolName, backendPoolParameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.BackendPoolsClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.BackendPoolsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client BackendPoolsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, frontDoorName string, backendPoolName string, backendPoolParameters BackendPool) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "backendPoolName": autorest.Encode("path", backendPoolName),
+ "frontDoorName": autorest.Encode("path", frontDoorName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ backendPoolParameters.Type = nil
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/backendPools/{backendPoolName}", pathParameters),
+ autorest.WithJSON(backendPoolParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client BackendPoolsClient) CreateOrUpdateSender(req *http.Request) (future BackendPoolsCreateOrUpdateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client BackendPoolsClient) CreateOrUpdateResponder(resp *http.Response) (result BackendPool, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes an existing Backend Pool with the specified parameters.
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// frontDoorName - name of the Front Door which is globally unique.
+// backendPoolName - name of the Backend Pool which is unique within the Front Door.
+func (client BackendPoolsClient) Delete(ctx context.Context, resourceGroupName string, frontDoorName string, backendPoolName string) (result BackendPoolsDeleteFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BackendPoolsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
+ {TargetValue: frontDoorName,
+ Constraints: []validation.Constraint{{Target: "frontDoorName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "frontDoorName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "frontDoorName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$`, Chain: nil}}},
+ {TargetValue: backendPoolName,
+ Constraints: []validation.Constraint{{Target: "backendPoolName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "backendPoolName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "backendPoolName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.BackendPoolsClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, frontDoorName, backendPoolName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.BackendPoolsClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.DeleteSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.BackendPoolsClient", "Delete", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client BackendPoolsClient) DeletePreparer(ctx context.Context, resourceGroupName string, frontDoorName string, backendPoolName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "backendPoolName": autorest.Encode("path", backendPoolName),
+ "frontDoorName": autorest.Encode("path", frontDoorName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/backendPools/{backendPoolName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client BackendPoolsClient) DeleteSender(req *http.Request) (future BackendPoolsDeleteFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client BackendPoolsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets a Backend Pool with the specified Pool name within the specified Front Door.
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// frontDoorName - name of the Front Door which is globally unique.
+// backendPoolName - name of the Backend Pool which is unique within the Front Door.
+func (client BackendPoolsClient) Get(ctx context.Context, resourceGroupName string, frontDoorName string, backendPoolName string) (result BackendPool, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BackendPoolsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
+ {TargetValue: frontDoorName,
+ Constraints: []validation.Constraint{{Target: "frontDoorName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "frontDoorName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "frontDoorName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$`, Chain: nil}}},
+ {TargetValue: backendPoolName,
+ Constraints: []validation.Constraint{{Target: "backendPoolName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "backendPoolName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "backendPoolName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.BackendPoolsClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, frontDoorName, backendPoolName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.BackendPoolsClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "frontdoor.BackendPoolsClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.BackendPoolsClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client BackendPoolsClient) GetPreparer(ctx context.Context, resourceGroupName string, frontDoorName string, backendPoolName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "backendPoolName": autorest.Encode("path", backendPoolName),
+ "frontDoorName": autorest.Encode("path", frontDoorName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/backendPools/{backendPoolName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client BackendPoolsClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client BackendPoolsClient) GetResponder(resp *http.Response) (result BackendPool, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByFrontDoor lists all of the Backend Pools within a Front Door.
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// frontDoorName - name of the Front Door which is globally unique.
+func (client BackendPoolsClient) ListByFrontDoor(ctx context.Context, resourceGroupName string, frontDoorName string) (result BackendPoolListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BackendPoolsClient.ListByFrontDoor")
+ defer func() {
+ sc := -1
+ if result.bplr.Response.Response != nil {
+ sc = result.bplr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
+ {TargetValue: frontDoorName,
+ Constraints: []validation.Constraint{{Target: "frontDoorName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "frontDoorName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "frontDoorName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.BackendPoolsClient", "ListByFrontDoor", err.Error())
+ }
+
+ result.fn = client.listByFrontDoorNextResults
+ req, err := client.ListByFrontDoorPreparer(ctx, resourceGroupName, frontDoorName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.BackendPoolsClient", "ListByFrontDoor", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByFrontDoorSender(req)
+ if err != nil {
+ result.bplr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "frontdoor.BackendPoolsClient", "ListByFrontDoor", resp, "Failure sending request")
+ return
+ }
+
+ result.bplr, err = client.ListByFrontDoorResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.BackendPoolsClient", "ListByFrontDoor", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByFrontDoorPreparer prepares the ListByFrontDoor request.
+func (client BackendPoolsClient) ListByFrontDoorPreparer(ctx context.Context, resourceGroupName string, frontDoorName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "frontDoorName": autorest.Encode("path", frontDoorName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/backendPools", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByFrontDoorSender sends the ListByFrontDoor request. The method will close the
+// http.Response Body if it receives an error.
+func (client BackendPoolsClient) ListByFrontDoorSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByFrontDoorResponder handles the response to the ListByFrontDoor request. The method always
+// closes the http.Response Body.
+func (client BackendPoolsClient) ListByFrontDoorResponder(resp *http.Response) (result BackendPoolListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByFrontDoorNextResults retrieves the next set of results, if any.
+func (client BackendPoolsClient) listByFrontDoorNextResults(ctx context.Context, lastResults BackendPoolListResult) (result BackendPoolListResult, err error) {
+ req, err := lastResults.backendPoolListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "frontdoor.BackendPoolsClient", "listByFrontDoorNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByFrontDoorSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "frontdoor.BackendPoolsClient", "listByFrontDoorNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByFrontDoorResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.BackendPoolsClient", "listByFrontDoorNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByFrontDoorComplete enumerates all values, automatically crossing page boundaries as required.
+func (client BackendPoolsClient) ListByFrontDoorComplete(ctx context.Context, resourceGroupName string, frontDoorName string) (result BackendPoolListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BackendPoolsClient.ListByFrontDoor")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByFrontDoor(ctx, resourceGroupName, frontDoorName)
+ return
+}
diff --git a/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/client.go b/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/client.go
new file mode 100644
index 000000000000..4ae20818d416
--- /dev/null
+++ b/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/client.go
@@ -0,0 +1,216 @@
+// Package frontdoor implements the Azure ARM Frontdoor service API version .
+//
+// FrontDoor Client
+package frontdoor
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+const (
+ // DefaultBaseURI is the default URI used for the service Frontdoor
+ DefaultBaseURI = "https://management.azure.com"
+)
+
+// BaseClient is the base client for Frontdoor.
+type BaseClient struct {
+ autorest.Client
+ BaseURI string
+ SubscriptionID string
+}
+
+// New creates an instance of the BaseClient client.
+func New(subscriptionID string) BaseClient {
+ return NewWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewWithBaseURI creates an instance of the BaseClient client.
+func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient {
+ return BaseClient{
+ Client: autorest.NewClientWithUserAgent(UserAgent()),
+ BaseURI: baseURI,
+ SubscriptionID: subscriptionID,
+ }
+}
+
+// CheckFrontDoorNameAvailability check the availability of a Front Door resource name.
+// Parameters:
+// checkFrontDoorNameAvailabilityInput - input to check.
+func (client BaseClient) CheckFrontDoorNameAvailability(ctx context.Context, checkFrontDoorNameAvailabilityInput CheckNameAvailabilityInput) (result CheckNameAvailabilityOutput, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.CheckFrontDoorNameAvailability")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: checkFrontDoorNameAvailabilityInput,
+ Constraints: []validation.Constraint{{Target: "checkFrontDoorNameAvailabilityInput.Name", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.BaseClient", "CheckFrontDoorNameAvailability", err.Error())
+ }
+
+ req, err := client.CheckFrontDoorNameAvailabilityPreparer(ctx, checkFrontDoorNameAvailabilityInput)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.BaseClient", "CheckFrontDoorNameAvailability", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CheckFrontDoorNameAvailabilitySender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "frontdoor.BaseClient", "CheckFrontDoorNameAvailability", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CheckFrontDoorNameAvailabilityResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.BaseClient", "CheckFrontDoorNameAvailability", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CheckFrontDoorNameAvailabilityPreparer prepares the CheckFrontDoorNameAvailability request.
+func (client BaseClient) CheckFrontDoorNameAvailabilityPreparer(ctx context.Context, checkFrontDoorNameAvailabilityInput CheckNameAvailabilityInput) (*http.Request, error) {
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPath("/providers/Microsoft.Network/checkFrontDoorNameAvailability"),
+ autorest.WithJSON(checkFrontDoorNameAvailabilityInput),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CheckFrontDoorNameAvailabilitySender sends the CheckFrontDoorNameAvailability request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) CheckFrontDoorNameAvailabilitySender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CheckFrontDoorNameAvailabilityResponder handles the response to the CheckFrontDoorNameAvailability request. The method always
+// closes the http.Response Body.
+func (client BaseClient) CheckFrontDoorNameAvailabilityResponder(resp *http.Response) (result CheckNameAvailabilityOutput, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// CheckFrontDoorNameAvailabilityWithSubscription check the availability of a Front Door subdomain.
+// Parameters:
+// checkFrontDoorNameAvailabilityInput - input to check.
+func (client BaseClient) CheckFrontDoorNameAvailabilityWithSubscription(ctx context.Context, checkFrontDoorNameAvailabilityInput CheckNameAvailabilityInput) (result CheckNameAvailabilityOutput, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.CheckFrontDoorNameAvailabilityWithSubscription")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: checkFrontDoorNameAvailabilityInput,
+ Constraints: []validation.Constraint{{Target: "checkFrontDoorNameAvailabilityInput.Name", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.BaseClient", "CheckFrontDoorNameAvailabilityWithSubscription", err.Error())
+ }
+
+ req, err := client.CheckFrontDoorNameAvailabilityWithSubscriptionPreparer(ctx, checkFrontDoorNameAvailabilityInput)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.BaseClient", "CheckFrontDoorNameAvailabilityWithSubscription", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CheckFrontDoorNameAvailabilityWithSubscriptionSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "frontdoor.BaseClient", "CheckFrontDoorNameAvailabilityWithSubscription", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CheckFrontDoorNameAvailabilityWithSubscriptionResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.BaseClient", "CheckFrontDoorNameAvailabilityWithSubscription", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CheckFrontDoorNameAvailabilityWithSubscriptionPreparer prepares the CheckFrontDoorNameAvailabilityWithSubscription request.
+func (client BaseClient) CheckFrontDoorNameAvailabilityWithSubscriptionPreparer(ctx context.Context, checkFrontDoorNameAvailabilityInput CheckNameAvailabilityInput) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/checkFrontDoorNameAvailability", pathParameters),
+ autorest.WithJSON(checkFrontDoorNameAvailabilityInput),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CheckFrontDoorNameAvailabilityWithSubscriptionSender sends the CheckFrontDoorNameAvailabilityWithSubscription request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) CheckFrontDoorNameAvailabilityWithSubscriptionSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CheckFrontDoorNameAvailabilityWithSubscriptionResponder handles the response to the CheckFrontDoorNameAvailabilityWithSubscription request. The method always
+// closes the http.Response Body.
+func (client BaseClient) CheckFrontDoorNameAvailabilityWithSubscriptionResponder(resp *http.Response) (result CheckNameAvailabilityOutput, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/endpoints.go b/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/endpoints.go
new file mode 100644
index 000000000000..45b3caf3f6eb
--- /dev/null
+++ b/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/endpoints.go
@@ -0,0 +1,137 @@
+package frontdoor
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// EndpointsClient is the frontDoor Client
+type EndpointsClient struct {
+ BaseClient
+}
+
+// NewEndpointsClient creates an instance of the EndpointsClient client.
+func NewEndpointsClient(subscriptionID string) EndpointsClient {
+ return NewEndpointsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewEndpointsClientWithBaseURI creates an instance of the EndpointsClient client.
+func NewEndpointsClientWithBaseURI(baseURI string, subscriptionID string) EndpointsClient {
+ return EndpointsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// PurgeContent removes a content from Front Door.
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// frontDoorName - name of the Front Door which is globally unique.
+// contentFilePaths - the path to the content to be purged. Path can be a full URL, e.g. '/pictures/city.png'
+// which removes a single file, or a directory with a wildcard, e.g. '/pictures/*' which removes all folders
+// and files in the directory.
+func (client EndpointsClient) PurgeContent(ctx context.Context, resourceGroupName string, frontDoorName string, contentFilePaths PurgeParameters) (result EndpointsPurgeContentFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/EndpointsClient.PurgeContent")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
+ {TargetValue: frontDoorName,
+ Constraints: []validation.Constraint{{Target: "frontDoorName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "frontDoorName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "frontDoorName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$`, Chain: nil}}},
+ {TargetValue: contentFilePaths,
+ Constraints: []validation.Constraint{{Target: "contentFilePaths.ContentPaths", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.EndpointsClient", "PurgeContent", err.Error())
+ }
+
+ req, err := client.PurgeContentPreparer(ctx, resourceGroupName, frontDoorName, contentFilePaths)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.EndpointsClient", "PurgeContent", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.PurgeContentSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.EndpointsClient", "PurgeContent", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// PurgeContentPreparer prepares the PurgeContent request.
+func (client EndpointsClient) PurgeContentPreparer(ctx context.Context, resourceGroupName string, frontDoorName string, contentFilePaths PurgeParameters) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "frontDoorName": autorest.Encode("path", frontDoorName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/purge", pathParameters),
+ autorest.WithJSON(contentFilePaths),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// PurgeContentSender sends the PurgeContent request. The method will close the
+// http.Response Body if it receives an error.
+func (client EndpointsClient) PurgeContentSender(req *http.Request) (future EndpointsPurgeContentFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// PurgeContentResponder handles the response to the PurgeContent request. The method always
+// closes the http.Response Body.
+func (client EndpointsClient) PurgeContentResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
diff --git a/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/frontdoorapi/interfaces.go b/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/frontdoorapi/interfaces.go
new file mode 100644
index 000000000000..abed0a7f8810
--- /dev/null
+++ b/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/frontdoorapi/interfaces.go
@@ -0,0 +1,119 @@
+package frontdoorapi
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/azure-sdk-for-go/services/preview/frontdoor/mgmt/2019-05-01/frontdoor"
+)
+
+// BaseClientAPI contains the set of methods on the BaseClient type.
+type BaseClientAPI interface {
+ CheckFrontDoorNameAvailability(ctx context.Context, checkFrontDoorNameAvailabilityInput frontdoor.CheckNameAvailabilityInput) (result frontdoor.CheckNameAvailabilityOutput, err error)
+ CheckFrontDoorNameAvailabilityWithSubscription(ctx context.Context, checkFrontDoorNameAvailabilityInput frontdoor.CheckNameAvailabilityInput) (result frontdoor.CheckNameAvailabilityOutput, err error)
+}
+
+var _ BaseClientAPI = (*frontdoor.BaseClient)(nil)
+
+// FrontDoorsClientAPI contains the set of methods on the FrontDoorsClient type.
+type FrontDoorsClientAPI interface {
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, frontDoorName string, frontDoorParameters frontdoor.FrontDoor) (result frontdoor.FrontDoorsCreateOrUpdateFutureType, err error)
+ Delete(ctx context.Context, resourceGroupName string, frontDoorName string) (result frontdoor.FrontDoorsDeleteFutureType, err error)
+ Get(ctx context.Context, resourceGroupName string, frontDoorName string) (result frontdoor.FrontDoor, err error)
+ List(ctx context.Context) (result frontdoor.ListResultPage, err error)
+ ListByResourceGroup(ctx context.Context, resourceGroupName string) (result frontdoor.ListResultPage, err error)
+ ValidateCustomDomain(ctx context.Context, resourceGroupName string, frontDoorName string, customDomainProperties frontdoor.ValidateCustomDomainInput) (result frontdoor.ValidateCustomDomainOutput, err error)
+}
+
+var _ FrontDoorsClientAPI = (*frontdoor.FrontDoorsClient)(nil)
+
+// RoutingRulesClientAPI contains the set of methods on the RoutingRulesClient type.
+type RoutingRulesClientAPI interface {
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, frontDoorName string, routingRuleName string, routingRuleParameters frontdoor.RoutingRule) (result frontdoor.RoutingRulesCreateOrUpdateFuture, err error)
+ Delete(ctx context.Context, resourceGroupName string, frontDoorName string, routingRuleName string) (result frontdoor.RoutingRulesDeleteFuture, err error)
+ Get(ctx context.Context, resourceGroupName string, frontDoorName string, routingRuleName string) (result frontdoor.RoutingRule, err error)
+ ListByFrontDoor(ctx context.Context, resourceGroupName string, frontDoorName string) (result frontdoor.RoutingRuleListResultPage, err error)
+}
+
+var _ RoutingRulesClientAPI = (*frontdoor.RoutingRulesClient)(nil)
+
+// HealthProbeSettingsClientAPI contains the set of methods on the HealthProbeSettingsClient type.
+type HealthProbeSettingsClientAPI interface {
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, frontDoorName string, healthProbeSettingsName string, healthProbeSettingsParameters frontdoor.HealthProbeSettingsModel) (result frontdoor.HealthProbeSettingsCreateOrUpdateFuture, err error)
+ Delete(ctx context.Context, resourceGroupName string, frontDoorName string, healthProbeSettingsName string) (result frontdoor.HealthProbeSettingsDeleteFuture, err error)
+ Get(ctx context.Context, resourceGroupName string, frontDoorName string, healthProbeSettingsName string) (result frontdoor.HealthProbeSettingsModel, err error)
+ ListByFrontDoor(ctx context.Context, resourceGroupName string, frontDoorName string) (result frontdoor.HealthProbeSettingsListResultPage, err error)
+}
+
+var _ HealthProbeSettingsClientAPI = (*frontdoor.HealthProbeSettingsClient)(nil)
+
+// LoadBalancingSettingsClientAPI contains the set of methods on the LoadBalancingSettingsClient type.
+type LoadBalancingSettingsClientAPI interface {
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, frontDoorName string, loadBalancingSettingsName string, loadBalancingSettingsParameters frontdoor.LoadBalancingSettingsModel) (result frontdoor.LoadBalancingSettingsCreateOrUpdateFuture, err error)
+ Delete(ctx context.Context, resourceGroupName string, frontDoorName string, loadBalancingSettingsName string) (result frontdoor.LoadBalancingSettingsDeleteFuture, err error)
+ Get(ctx context.Context, resourceGroupName string, frontDoorName string, loadBalancingSettingsName string) (result frontdoor.LoadBalancingSettingsModel, err error)
+ ListByFrontDoor(ctx context.Context, resourceGroupName string, frontDoorName string) (result frontdoor.LoadBalancingSettingsListResultPage, err error)
+}
+
+var _ LoadBalancingSettingsClientAPI = (*frontdoor.LoadBalancingSettingsClient)(nil)
+
+// BackendPoolsClientAPI contains the set of methods on the BackendPoolsClient type.
+type BackendPoolsClientAPI interface {
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, frontDoorName string, backendPoolName string, backendPoolParameters frontdoor.BackendPool) (result frontdoor.BackendPoolsCreateOrUpdateFuture, err error)
+ Delete(ctx context.Context, resourceGroupName string, frontDoorName string, backendPoolName string) (result frontdoor.BackendPoolsDeleteFuture, err error)
+ Get(ctx context.Context, resourceGroupName string, frontDoorName string, backendPoolName string) (result frontdoor.BackendPool, err error)
+ ListByFrontDoor(ctx context.Context, resourceGroupName string, frontDoorName string) (result frontdoor.BackendPoolListResultPage, err error)
+}
+
+var _ BackendPoolsClientAPI = (*frontdoor.BackendPoolsClient)(nil)
+
+// FrontendEndpointsClientAPI contains the set of methods on the FrontendEndpointsClient type.
+type FrontendEndpointsClientAPI interface {
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, frontDoorName string, frontendEndpointName string, frontendEndpointParameters frontdoor.FrontendEndpoint) (result frontdoor.FrontendEndpointsCreateOrUpdateFuture, err error)
+ Delete(ctx context.Context, resourceGroupName string, frontDoorName string, frontendEndpointName string) (result frontdoor.FrontendEndpointsDeleteFuture, err error)
+ DisableHTTPS(ctx context.Context, resourceGroupName string, frontDoorName string, frontendEndpointName string) (result frontdoor.FrontendEndpointsDisableHTTPSFuture, err error)
+ EnableHTTPS(ctx context.Context, resourceGroupName string, frontDoorName string, frontendEndpointName string, customHTTPSConfiguration frontdoor.CustomHTTPSConfiguration) (result frontdoor.FrontendEndpointsEnableHTTPSFuture, err error)
+ Get(ctx context.Context, resourceGroupName string, frontDoorName string, frontendEndpointName string) (result frontdoor.FrontendEndpoint, err error)
+ ListByFrontDoor(ctx context.Context, resourceGroupName string, frontDoorName string) (result frontdoor.FrontendEndpointsListResultPage, err error)
+}
+
+var _ FrontendEndpointsClientAPI = (*frontdoor.FrontendEndpointsClient)(nil)
+
+// EndpointsClientAPI contains the set of methods on the EndpointsClient type.
+type EndpointsClientAPI interface {
+ PurgeContent(ctx context.Context, resourceGroupName string, frontDoorName string, contentFilePaths frontdoor.PurgeParameters) (result frontdoor.EndpointsPurgeContentFuture, err error)
+}
+
+var _ EndpointsClientAPI = (*frontdoor.EndpointsClient)(nil)
+
+// PoliciesClientAPI contains the set of methods on the PoliciesClient type.
+type PoliciesClientAPI interface {
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, policyName string, parameters frontdoor.WebApplicationFirewallPolicy) (result frontdoor.PoliciesCreateOrUpdateFuture, err error)
+ Delete(ctx context.Context, resourceGroupName string, policyName string) (result frontdoor.PoliciesDeleteFuture, err error)
+ Get(ctx context.Context, resourceGroupName string, policyName string) (result frontdoor.WebApplicationFirewallPolicy, err error)
+ List(ctx context.Context, resourceGroupName string) (result frontdoor.WebApplicationFirewallPolicyListPage, err error)
+}
+
+var _ PoliciesClientAPI = (*frontdoor.PoliciesClient)(nil)
+
+// ManagedRuleSetsClientAPI contains the set of methods on the ManagedRuleSetsClient type.
+type ManagedRuleSetsClientAPI interface {
+ List(ctx context.Context) (result frontdoor.ManagedRuleSetDefinitionListPage, err error)
+}
+
+var _ ManagedRuleSetsClientAPI = (*frontdoor.ManagedRuleSetsClient)(nil)
diff --git a/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/frontdoors.go b/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/frontdoors.go
new file mode 100644
index 000000000000..69c824ef87ce
--- /dev/null
+++ b/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/frontdoors.go
@@ -0,0 +1,637 @@
+package frontdoor
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// FrontDoorsClient is the frontDoor Client
+type FrontDoorsClient struct {
+ BaseClient
+}
+
+// NewFrontDoorsClient creates an instance of the FrontDoorsClient client.
+func NewFrontDoorsClient(subscriptionID string) FrontDoorsClient {
+ return NewFrontDoorsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewFrontDoorsClientWithBaseURI creates an instance of the FrontDoorsClient client.
+func NewFrontDoorsClientWithBaseURI(baseURI string, subscriptionID string) FrontDoorsClient {
+ return FrontDoorsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate creates a new Front Door with a Front Door name under the specified subscription and resource group.
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// frontDoorName - name of the Front Door which is globally unique.
+// frontDoorParameters - front Door properties needed to create a new Front Door.
+func (client FrontDoorsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, frontDoorName string, frontDoorParameters FrontDoor) (result FrontDoorsCreateOrUpdateFutureType, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FrontDoorsClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
+ {TargetValue: frontDoorName,
+ Constraints: []validation.Constraint{{Target: "frontDoorName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "frontDoorName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "frontDoorName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.FrontDoorsClient", "CreateOrUpdate", err.Error())
+ }
+
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, frontDoorName, frontDoorParameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontDoorsClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontDoorsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client FrontDoorsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, frontDoorName string, frontDoorParameters FrontDoor) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "frontDoorName": autorest.Encode("path", frontDoorName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}", pathParameters),
+ autorest.WithJSON(frontDoorParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client FrontDoorsClient) CreateOrUpdateSender(req *http.Request) (future FrontDoorsCreateOrUpdateFutureType, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client FrontDoorsClient) CreateOrUpdateResponder(resp *http.Response) (result FrontDoor, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes an existing Front Door with the specified parameters.
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// frontDoorName - name of the Front Door which is globally unique.
+func (client FrontDoorsClient) Delete(ctx context.Context, resourceGroupName string, frontDoorName string) (result FrontDoorsDeleteFutureType, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FrontDoorsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
+ {TargetValue: frontDoorName,
+ Constraints: []validation.Constraint{{Target: "frontDoorName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "frontDoorName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "frontDoorName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.FrontDoorsClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, frontDoorName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontDoorsClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.DeleteSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontDoorsClient", "Delete", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client FrontDoorsClient) DeletePreparer(ctx context.Context, resourceGroupName string, frontDoorName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "frontDoorName": autorest.Encode("path", frontDoorName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client FrontDoorsClient) DeleteSender(req *http.Request) (future FrontDoorsDeleteFutureType, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client FrontDoorsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets a Front Door with the specified Front Door name under the specified subscription and resource group.
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// frontDoorName - name of the Front Door which is globally unique.
+func (client FrontDoorsClient) Get(ctx context.Context, resourceGroupName string, frontDoorName string) (result FrontDoor, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FrontDoorsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
+ {TargetValue: frontDoorName,
+ Constraints: []validation.Constraint{{Target: "frontDoorName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "frontDoorName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "frontDoorName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.FrontDoorsClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, frontDoorName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontDoorsClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontDoorsClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontDoorsClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client FrontDoorsClient) GetPreparer(ctx context.Context, resourceGroupName string, frontDoorName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "frontDoorName": autorest.Encode("path", frontDoorName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client FrontDoorsClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client FrontDoorsClient) GetResponder(resp *http.Response) (result FrontDoor, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List lists all of the Front Doors within an Azure subscription.
+func (client FrontDoorsClient) List(ctx context.Context) (result ListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FrontDoorsClient.List")
+ defer func() {
+ sc := -1
+ if result.lr.Response.Response != nil {
+ sc = result.lr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontDoorsClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.lr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontDoorsClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.lr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontDoorsClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client FrontDoorsClient) ListPreparer(ctx context.Context) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/frontDoors", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client FrontDoorsClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client FrontDoorsClient) ListResponder(resp *http.Response) (result ListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client FrontDoorsClient) listNextResults(ctx context.Context, lastResults ListResult) (result ListResult, err error) {
+ req, err := lastResults.listResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "frontdoor.FrontDoorsClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "frontdoor.FrontDoorsClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontDoorsClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client FrontDoorsClient) ListComplete(ctx context.Context) (result ListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FrontDoorsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx)
+ return
+}
+
+// ListByResourceGroup lists all of the Front Doors within a resource group under a subscription.
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+func (client FrontDoorsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result ListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FrontDoorsClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.lr.Response.Response != nil {
+ sc = result.lr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.FrontDoorsClient", "ListByResourceGroup", err.Error())
+ }
+
+ result.fn = client.listByResourceGroupNextResults
+ req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontDoorsClient", "ListByResourceGroup", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByResourceGroupSender(req)
+ if err != nil {
+ result.lr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontDoorsClient", "ListByResourceGroup", resp, "Failure sending request")
+ return
+ }
+
+ result.lr, err = client.ListByResourceGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontDoorsClient", "ListByResourceGroup", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
+func (client FrontDoorsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
+// http.Response Body if it receives an error.
+func (client FrontDoorsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
+// closes the http.Response Body.
+func (client FrontDoorsClient) ListByResourceGroupResponder(resp *http.Response) (result ListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByResourceGroupNextResults retrieves the next set of results, if any.
+func (client FrontDoorsClient) listByResourceGroupNextResults(ctx context.Context, lastResults ListResult) (result ListResult, err error) {
+ req, err := lastResults.listResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "frontdoor.FrontDoorsClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByResourceGroupSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "frontdoor.FrontDoorsClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByResourceGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontDoorsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
+func (client FrontDoorsClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result ListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FrontDoorsClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByResourceGroup(ctx, resourceGroupName)
+ return
+}
+
+// ValidateCustomDomain validates the custom domain mapping to ensure it maps to the correct Front Door endpoint in
+// DNS.
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// frontDoorName - name of the Front Door which is globally unique.
+// customDomainProperties - custom domain to be validated.
+func (client FrontDoorsClient) ValidateCustomDomain(ctx context.Context, resourceGroupName string, frontDoorName string, customDomainProperties ValidateCustomDomainInput) (result ValidateCustomDomainOutput, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FrontDoorsClient.ValidateCustomDomain")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
+ {TargetValue: frontDoorName,
+ Constraints: []validation.Constraint{{Target: "frontDoorName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "frontDoorName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "frontDoorName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$`, Chain: nil}}},
+ {TargetValue: customDomainProperties,
+ Constraints: []validation.Constraint{{Target: "customDomainProperties.HostName", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.FrontDoorsClient", "ValidateCustomDomain", err.Error())
+ }
+
+ req, err := client.ValidateCustomDomainPreparer(ctx, resourceGroupName, frontDoorName, customDomainProperties)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontDoorsClient", "ValidateCustomDomain", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ValidateCustomDomainSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontDoorsClient", "ValidateCustomDomain", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ValidateCustomDomainResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontDoorsClient", "ValidateCustomDomain", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ValidateCustomDomainPreparer prepares the ValidateCustomDomain request.
+func (client FrontDoorsClient) ValidateCustomDomainPreparer(ctx context.Context, resourceGroupName string, frontDoorName string, customDomainProperties ValidateCustomDomainInput) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "frontDoorName": autorest.Encode("path", frontDoorName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/validateCustomDomain", pathParameters),
+ autorest.WithJSON(customDomainProperties),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ValidateCustomDomainSender sends the ValidateCustomDomain request. The method will close the
+// http.Response Body if it receives an error.
+func (client FrontDoorsClient) ValidateCustomDomainSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ValidateCustomDomainResponder handles the response to the ValidateCustomDomain request. The method always
+// closes the http.Response Body.
+func (client FrontDoorsClient) ValidateCustomDomainResponder(resp *http.Response) (result ValidateCustomDomainOutput, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/frontendendpoints.go b/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/frontendendpoints.go
new file mode 100644
index 000000000000..6dbff638e20a
--- /dev/null
+++ b/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/frontendendpoints.go
@@ -0,0 +1,655 @@
+package frontdoor
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// FrontendEndpointsClient is the frontDoor Client
+type FrontendEndpointsClient struct {
+ BaseClient
+}
+
+// NewFrontendEndpointsClient creates an instance of the FrontendEndpointsClient client.
+func NewFrontendEndpointsClient(subscriptionID string) FrontendEndpointsClient {
+ return NewFrontendEndpointsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewFrontendEndpointsClientWithBaseURI creates an instance of the FrontendEndpointsClient client.
+func NewFrontendEndpointsClientWithBaseURI(baseURI string, subscriptionID string) FrontendEndpointsClient {
+ return FrontendEndpointsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate creates a new frontend endpoint with the specified host name within the specified Front Door.
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// frontDoorName - name of the Front Door which is globally unique.
+// frontendEndpointName - name of the Frontend endpoint which is unique within the Front Door.
+// frontendEndpointParameters - frontend endpoint properties needed to create a new endpoint.
+func (client FrontendEndpointsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, frontDoorName string, frontendEndpointName string, frontendEndpointParameters FrontendEndpoint) (result FrontendEndpointsCreateOrUpdateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FrontendEndpointsClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
+ {TargetValue: frontDoorName,
+ Constraints: []validation.Constraint{{Target: "frontDoorName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "frontDoorName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "frontDoorName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$`, Chain: nil}}},
+ {TargetValue: frontendEndpointName,
+ Constraints: []validation.Constraint{{Target: "frontendEndpointName", Name: validation.MaxLength, Rule: 255, Chain: nil},
+ {Target: "frontendEndpointName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "frontendEndpointName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}},
+ {TargetValue: frontendEndpointParameters,
+ Constraints: []validation.Constraint{{Target: "frontendEndpointParameters.FrontendEndpointProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "frontendEndpointParameters.FrontendEndpointProperties.CustomHTTPSConfiguration", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "frontendEndpointParameters.FrontendEndpointProperties.CustomHTTPSConfiguration.ProtocolType", Name: validation.Null, Rule: true, Chain: nil}}},
+ }}}}}); err != nil {
+ return result, validation.NewError("frontdoor.FrontendEndpointsClient", "CreateOrUpdate", err.Error())
+ }
+
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, frontDoorName, frontendEndpointName, frontendEndpointParameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontendEndpointsClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontendEndpointsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client FrontendEndpointsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, frontDoorName string, frontendEndpointName string, frontendEndpointParameters FrontendEndpoint) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "frontDoorName": autorest.Encode("path", frontDoorName),
+ "frontendEndpointName": autorest.Encode("path", frontendEndpointName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ frontendEndpointParameters.Type = nil
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/frontendEndpoints/{frontendEndpointName}", pathParameters),
+ autorest.WithJSON(frontendEndpointParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client FrontendEndpointsClient) CreateOrUpdateSender(req *http.Request) (future FrontendEndpointsCreateOrUpdateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client FrontendEndpointsClient) CreateOrUpdateResponder(resp *http.Response) (result FrontendEndpoint, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes an existing frontend endpoint with the specified parameters.
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// frontDoorName - name of the Front Door which is globally unique.
+// frontendEndpointName - name of the Frontend endpoint which is unique within the Front Door.
+func (client FrontendEndpointsClient) Delete(ctx context.Context, resourceGroupName string, frontDoorName string, frontendEndpointName string) (result FrontendEndpointsDeleteFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FrontendEndpointsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
+ {TargetValue: frontDoorName,
+ Constraints: []validation.Constraint{{Target: "frontDoorName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "frontDoorName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "frontDoorName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$`, Chain: nil}}},
+ {TargetValue: frontendEndpointName,
+ Constraints: []validation.Constraint{{Target: "frontendEndpointName", Name: validation.MaxLength, Rule: 255, Chain: nil},
+ {Target: "frontendEndpointName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "frontendEndpointName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.FrontendEndpointsClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, frontDoorName, frontendEndpointName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontendEndpointsClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.DeleteSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontendEndpointsClient", "Delete", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client FrontendEndpointsClient) DeletePreparer(ctx context.Context, resourceGroupName string, frontDoorName string, frontendEndpointName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "frontDoorName": autorest.Encode("path", frontDoorName),
+ "frontendEndpointName": autorest.Encode("path", frontendEndpointName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/frontendEndpoints/{frontendEndpointName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client FrontendEndpointsClient) DeleteSender(req *http.Request) (future FrontendEndpointsDeleteFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client FrontendEndpointsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// DisableHTTPS disables a frontendEndpoint for HTTPS traffic
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// frontDoorName - name of the Front Door which is globally unique.
+// frontendEndpointName - name of the Frontend endpoint which is unique within the Front Door.
+func (client FrontendEndpointsClient) DisableHTTPS(ctx context.Context, resourceGroupName string, frontDoorName string, frontendEndpointName string) (result FrontendEndpointsDisableHTTPSFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FrontendEndpointsClient.DisableHTTPS")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
+ {TargetValue: frontDoorName,
+ Constraints: []validation.Constraint{{Target: "frontDoorName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "frontDoorName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "frontDoorName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$`, Chain: nil}}},
+ {TargetValue: frontendEndpointName,
+ Constraints: []validation.Constraint{{Target: "frontendEndpointName", Name: validation.MaxLength, Rule: 255, Chain: nil},
+ {Target: "frontendEndpointName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "frontendEndpointName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.FrontendEndpointsClient", "DisableHTTPS", err.Error())
+ }
+
+ req, err := client.DisableHTTPSPreparer(ctx, resourceGroupName, frontDoorName, frontendEndpointName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontendEndpointsClient", "DisableHTTPS", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.DisableHTTPSSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontendEndpointsClient", "DisableHTTPS", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// DisableHTTPSPreparer prepares the DisableHTTPS request.
+func (client FrontendEndpointsClient) DisableHTTPSPreparer(ctx context.Context, resourceGroupName string, frontDoorName string, frontendEndpointName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "frontDoorName": autorest.Encode("path", frontDoorName),
+ "frontendEndpointName": autorest.Encode("path", frontendEndpointName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/frontendEndpoints/{frontendEndpointName}/disableHttps", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DisableHTTPSSender sends the DisableHTTPS request. The method will close the
+// http.Response Body if it receives an error.
+func (client FrontendEndpointsClient) DisableHTTPSSender(req *http.Request) (future FrontendEndpointsDisableHTTPSFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// DisableHTTPSResponder handles the response to the DisableHTTPS request. The method always
+// closes the http.Response Body.
+func (client FrontendEndpointsClient) DisableHTTPSResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// EnableHTTPS enables a frontendEndpoint for HTTPS traffic
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// frontDoorName - name of the Front Door which is globally unique.
+// frontendEndpointName - name of the Frontend endpoint which is unique within the Front Door.
+// customHTTPSConfiguration - the configuration specifying how to enable HTTPS
+func (client FrontendEndpointsClient) EnableHTTPS(ctx context.Context, resourceGroupName string, frontDoorName string, frontendEndpointName string, customHTTPSConfiguration CustomHTTPSConfiguration) (result FrontendEndpointsEnableHTTPSFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FrontendEndpointsClient.EnableHTTPS")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
+ {TargetValue: frontDoorName,
+ Constraints: []validation.Constraint{{Target: "frontDoorName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "frontDoorName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "frontDoorName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$`, Chain: nil}}},
+ {TargetValue: frontendEndpointName,
+ Constraints: []validation.Constraint{{Target: "frontendEndpointName", Name: validation.MaxLength, Rule: 255, Chain: nil},
+ {Target: "frontendEndpointName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "frontendEndpointName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}},
+ {TargetValue: customHTTPSConfiguration,
+ Constraints: []validation.Constraint{{Target: "customHTTPSConfiguration.ProtocolType", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.FrontendEndpointsClient", "EnableHTTPS", err.Error())
+ }
+
+ req, err := client.EnableHTTPSPreparer(ctx, resourceGroupName, frontDoorName, frontendEndpointName, customHTTPSConfiguration)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontendEndpointsClient", "EnableHTTPS", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.EnableHTTPSSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontendEndpointsClient", "EnableHTTPS", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// EnableHTTPSPreparer prepares the EnableHTTPS request.
+func (client FrontendEndpointsClient) EnableHTTPSPreparer(ctx context.Context, resourceGroupName string, frontDoorName string, frontendEndpointName string, customHTTPSConfiguration CustomHTTPSConfiguration) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "frontDoorName": autorest.Encode("path", frontDoorName),
+ "frontendEndpointName": autorest.Encode("path", frontendEndpointName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/frontendEndpoints/{frontendEndpointName}/enableHttps", pathParameters),
+ autorest.WithJSON(customHTTPSConfiguration),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// EnableHTTPSSender sends the EnableHTTPS request. The method will close the
+// http.Response Body if it receives an error.
+func (client FrontendEndpointsClient) EnableHTTPSSender(req *http.Request) (future FrontendEndpointsEnableHTTPSFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// EnableHTTPSResponder handles the response to the EnableHTTPS request. The method always
+// closes the http.Response Body.
+func (client FrontendEndpointsClient) EnableHTTPSResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets a Frontend endpoint with the specified name within the specified Front Door.
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// frontDoorName - name of the Front Door which is globally unique.
+// frontendEndpointName - name of the Frontend endpoint which is unique within the Front Door.
+func (client FrontendEndpointsClient) Get(ctx context.Context, resourceGroupName string, frontDoorName string, frontendEndpointName string) (result FrontendEndpoint, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FrontendEndpointsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
+ {TargetValue: frontDoorName,
+ Constraints: []validation.Constraint{{Target: "frontDoorName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "frontDoorName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "frontDoorName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$`, Chain: nil}}},
+ {TargetValue: frontendEndpointName,
+ Constraints: []validation.Constraint{{Target: "frontendEndpointName", Name: validation.MaxLength, Rule: 255, Chain: nil},
+ {Target: "frontendEndpointName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "frontendEndpointName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.FrontendEndpointsClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, frontDoorName, frontendEndpointName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontendEndpointsClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontendEndpointsClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontendEndpointsClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client FrontendEndpointsClient) GetPreparer(ctx context.Context, resourceGroupName string, frontDoorName string, frontendEndpointName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "frontDoorName": autorest.Encode("path", frontDoorName),
+ "frontendEndpointName": autorest.Encode("path", frontendEndpointName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/frontendEndpoints/{frontendEndpointName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client FrontendEndpointsClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client FrontendEndpointsClient) GetResponder(resp *http.Response) (result FrontendEndpoint, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByFrontDoor lists all of the frontend endpoints within a Front Door.
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// frontDoorName - name of the Front Door which is globally unique.
+func (client FrontendEndpointsClient) ListByFrontDoor(ctx context.Context, resourceGroupName string, frontDoorName string) (result FrontendEndpointsListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FrontendEndpointsClient.ListByFrontDoor")
+ defer func() {
+ sc := -1
+ if result.felr.Response.Response != nil {
+ sc = result.felr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
+ {TargetValue: frontDoorName,
+ Constraints: []validation.Constraint{{Target: "frontDoorName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "frontDoorName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "frontDoorName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.FrontendEndpointsClient", "ListByFrontDoor", err.Error())
+ }
+
+ result.fn = client.listByFrontDoorNextResults
+ req, err := client.ListByFrontDoorPreparer(ctx, resourceGroupName, frontDoorName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontendEndpointsClient", "ListByFrontDoor", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByFrontDoorSender(req)
+ if err != nil {
+ result.felr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontendEndpointsClient", "ListByFrontDoor", resp, "Failure sending request")
+ return
+ }
+
+ result.felr, err = client.ListByFrontDoorResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontendEndpointsClient", "ListByFrontDoor", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByFrontDoorPreparer prepares the ListByFrontDoor request.
+func (client FrontendEndpointsClient) ListByFrontDoorPreparer(ctx context.Context, resourceGroupName string, frontDoorName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "frontDoorName": autorest.Encode("path", frontDoorName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/frontendEndpoints", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByFrontDoorSender sends the ListByFrontDoor request. The method will close the
+// http.Response Body if it receives an error.
+func (client FrontendEndpointsClient) ListByFrontDoorSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByFrontDoorResponder handles the response to the ListByFrontDoor request. The method always
+// closes the http.Response Body.
+func (client FrontendEndpointsClient) ListByFrontDoorResponder(resp *http.Response) (result FrontendEndpointsListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByFrontDoorNextResults retrieves the next set of results, if any.
+func (client FrontendEndpointsClient) listByFrontDoorNextResults(ctx context.Context, lastResults FrontendEndpointsListResult) (result FrontendEndpointsListResult, err error) {
+ req, err := lastResults.frontendEndpointsListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "frontdoor.FrontendEndpointsClient", "listByFrontDoorNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByFrontDoorSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "frontdoor.FrontendEndpointsClient", "listByFrontDoorNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByFrontDoorResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontendEndpointsClient", "listByFrontDoorNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByFrontDoorComplete enumerates all values, automatically crossing page boundaries as required.
+func (client FrontendEndpointsClient) ListByFrontDoorComplete(ctx context.Context, resourceGroupName string, frontDoorName string) (result FrontendEndpointsListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FrontendEndpointsClient.ListByFrontDoor")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByFrontDoor(ctx, resourceGroupName, frontDoorName)
+ return
+}
diff --git a/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/healthprobesettings.go b/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/healthprobesettings.go
new file mode 100644
index 000000000000..3174cd47a673
--- /dev/null
+++ b/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/healthprobesettings.go
@@ -0,0 +1,457 @@
+package frontdoor
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// HealthProbeSettingsClient is the frontDoor Client
+type HealthProbeSettingsClient struct {
+ BaseClient
+}
+
+// NewHealthProbeSettingsClient creates an instance of the HealthProbeSettingsClient client.
+func NewHealthProbeSettingsClient(subscriptionID string) HealthProbeSettingsClient {
+ return NewHealthProbeSettingsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewHealthProbeSettingsClientWithBaseURI creates an instance of the HealthProbeSettingsClient client.
+func NewHealthProbeSettingsClientWithBaseURI(baseURI string, subscriptionID string) HealthProbeSettingsClient {
+ return HealthProbeSettingsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate creates a new HealthProbeSettings with the specified Rule name within the specified Front Door.
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// frontDoorName - name of the Front Door which is globally unique.
+// healthProbeSettingsName - name of the health probe settings which is unique within the Front Door.
+// healthProbeSettingsParameters - healthProbeSettings properties needed to create a new Front Door.
+func (client HealthProbeSettingsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, frontDoorName string, healthProbeSettingsName string, healthProbeSettingsParameters HealthProbeSettingsModel) (result HealthProbeSettingsCreateOrUpdateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/HealthProbeSettingsClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
+ {TargetValue: frontDoorName,
+ Constraints: []validation.Constraint{{Target: "frontDoorName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "frontDoorName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "frontDoorName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$`, Chain: nil}}},
+ {TargetValue: healthProbeSettingsName,
+ Constraints: []validation.Constraint{{Target: "healthProbeSettingsName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "healthProbeSettingsName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "healthProbeSettingsName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.HealthProbeSettingsClient", "CreateOrUpdate", err.Error())
+ }
+
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, frontDoorName, healthProbeSettingsName, healthProbeSettingsParameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.HealthProbeSettingsClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.HealthProbeSettingsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client HealthProbeSettingsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, frontDoorName string, healthProbeSettingsName string, healthProbeSettingsParameters HealthProbeSettingsModel) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "frontDoorName": autorest.Encode("path", frontDoorName),
+ "healthProbeSettingsName": autorest.Encode("path", healthProbeSettingsName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ healthProbeSettingsParameters.Type = nil
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/healthProbeSettings/{healthProbeSettingsName}", pathParameters),
+ autorest.WithJSON(healthProbeSettingsParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client HealthProbeSettingsClient) CreateOrUpdateSender(req *http.Request) (future HealthProbeSettingsCreateOrUpdateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client HealthProbeSettingsClient) CreateOrUpdateResponder(resp *http.Response) (result HealthProbeSettingsModel, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes an existing HealthProbeSettings with the specified parameters.
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// frontDoorName - name of the Front Door which is globally unique.
+// healthProbeSettingsName - name of the health probe settings which is unique within the Front Door.
+func (client HealthProbeSettingsClient) Delete(ctx context.Context, resourceGroupName string, frontDoorName string, healthProbeSettingsName string) (result HealthProbeSettingsDeleteFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/HealthProbeSettingsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
+ {TargetValue: frontDoorName,
+ Constraints: []validation.Constraint{{Target: "frontDoorName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "frontDoorName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "frontDoorName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$`, Chain: nil}}},
+ {TargetValue: healthProbeSettingsName,
+ Constraints: []validation.Constraint{{Target: "healthProbeSettingsName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "healthProbeSettingsName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "healthProbeSettingsName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.HealthProbeSettingsClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, frontDoorName, healthProbeSettingsName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.HealthProbeSettingsClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.DeleteSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.HealthProbeSettingsClient", "Delete", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client HealthProbeSettingsClient) DeletePreparer(ctx context.Context, resourceGroupName string, frontDoorName string, healthProbeSettingsName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "frontDoorName": autorest.Encode("path", frontDoorName),
+ "healthProbeSettingsName": autorest.Encode("path", healthProbeSettingsName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/healthProbeSettings/{healthProbeSettingsName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client HealthProbeSettingsClient) DeleteSender(req *http.Request) (future HealthProbeSettingsDeleteFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client HealthProbeSettingsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets a HealthProbeSettings with the specified Rule name within the specified Front Door.
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// frontDoorName - name of the Front Door which is globally unique.
+// healthProbeSettingsName - name of the health probe settings which is unique within the Front Door.
+func (client HealthProbeSettingsClient) Get(ctx context.Context, resourceGroupName string, frontDoorName string, healthProbeSettingsName string) (result HealthProbeSettingsModel, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/HealthProbeSettingsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
+ {TargetValue: frontDoorName,
+ Constraints: []validation.Constraint{{Target: "frontDoorName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "frontDoorName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "frontDoorName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$`, Chain: nil}}},
+ {TargetValue: healthProbeSettingsName,
+ Constraints: []validation.Constraint{{Target: "healthProbeSettingsName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "healthProbeSettingsName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "healthProbeSettingsName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.HealthProbeSettingsClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, frontDoorName, healthProbeSettingsName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.HealthProbeSettingsClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "frontdoor.HealthProbeSettingsClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.HealthProbeSettingsClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client HealthProbeSettingsClient) GetPreparer(ctx context.Context, resourceGroupName string, frontDoorName string, healthProbeSettingsName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "frontDoorName": autorest.Encode("path", frontDoorName),
+ "healthProbeSettingsName": autorest.Encode("path", healthProbeSettingsName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/healthProbeSettings/{healthProbeSettingsName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client HealthProbeSettingsClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client HealthProbeSettingsClient) GetResponder(resp *http.Response) (result HealthProbeSettingsModel, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByFrontDoor lists all of the HealthProbeSettings within a Front Door.
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// frontDoorName - name of the Front Door which is globally unique.
+func (client HealthProbeSettingsClient) ListByFrontDoor(ctx context.Context, resourceGroupName string, frontDoorName string) (result HealthProbeSettingsListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/HealthProbeSettingsClient.ListByFrontDoor")
+ defer func() {
+ sc := -1
+ if result.hpslr.Response.Response != nil {
+ sc = result.hpslr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
+ {TargetValue: frontDoorName,
+ Constraints: []validation.Constraint{{Target: "frontDoorName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "frontDoorName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "frontDoorName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.HealthProbeSettingsClient", "ListByFrontDoor", err.Error())
+ }
+
+ result.fn = client.listByFrontDoorNextResults
+ req, err := client.ListByFrontDoorPreparer(ctx, resourceGroupName, frontDoorName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.HealthProbeSettingsClient", "ListByFrontDoor", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByFrontDoorSender(req)
+ if err != nil {
+ result.hpslr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "frontdoor.HealthProbeSettingsClient", "ListByFrontDoor", resp, "Failure sending request")
+ return
+ }
+
+ result.hpslr, err = client.ListByFrontDoorResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.HealthProbeSettingsClient", "ListByFrontDoor", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByFrontDoorPreparer prepares the ListByFrontDoor request.
+func (client HealthProbeSettingsClient) ListByFrontDoorPreparer(ctx context.Context, resourceGroupName string, frontDoorName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "frontDoorName": autorest.Encode("path", frontDoorName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/healthProbeSettings", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByFrontDoorSender sends the ListByFrontDoor request. The method will close the
+// http.Response Body if it receives an error.
+func (client HealthProbeSettingsClient) ListByFrontDoorSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByFrontDoorResponder handles the response to the ListByFrontDoor request. The method always
+// closes the http.Response Body.
+func (client HealthProbeSettingsClient) ListByFrontDoorResponder(resp *http.Response) (result HealthProbeSettingsListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByFrontDoorNextResults retrieves the next set of results, if any.
+func (client HealthProbeSettingsClient) listByFrontDoorNextResults(ctx context.Context, lastResults HealthProbeSettingsListResult) (result HealthProbeSettingsListResult, err error) {
+ req, err := lastResults.healthProbeSettingsListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "frontdoor.HealthProbeSettingsClient", "listByFrontDoorNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByFrontDoorSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "frontdoor.HealthProbeSettingsClient", "listByFrontDoorNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByFrontDoorResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.HealthProbeSettingsClient", "listByFrontDoorNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByFrontDoorComplete enumerates all values, automatically crossing page boundaries as required.
+func (client HealthProbeSettingsClient) ListByFrontDoorComplete(ctx context.Context, resourceGroupName string, frontDoorName string) (result HealthProbeSettingsListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/HealthProbeSettingsClient.ListByFrontDoor")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByFrontDoor(ctx, resourceGroupName, frontDoorName)
+ return
+}
diff --git a/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/loadbalancingsettings.go b/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/loadbalancingsettings.go
new file mode 100644
index 000000000000..7068baf3e4f3
--- /dev/null
+++ b/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/loadbalancingsettings.go
@@ -0,0 +1,457 @@
+package frontdoor
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// LoadBalancingSettingsClient is the frontDoor Client
+type LoadBalancingSettingsClient struct {
+ BaseClient
+}
+
+// NewLoadBalancingSettingsClient creates an instance of the LoadBalancingSettingsClient client.
+func NewLoadBalancingSettingsClient(subscriptionID string) LoadBalancingSettingsClient {
+ return NewLoadBalancingSettingsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewLoadBalancingSettingsClientWithBaseURI creates an instance of the LoadBalancingSettingsClient client.
+func NewLoadBalancingSettingsClientWithBaseURI(baseURI string, subscriptionID string) LoadBalancingSettingsClient {
+ return LoadBalancingSettingsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate creates a new LoadBalancingSettings with the specified Rule name within the specified Front Door.
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// frontDoorName - name of the Front Door which is globally unique.
+// loadBalancingSettingsName - name of the load balancing settings which is unique within the Front Door.
+// loadBalancingSettingsParameters - loadBalancingSettings properties needed to create a new Front Door.
+func (client LoadBalancingSettingsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, frontDoorName string, loadBalancingSettingsName string, loadBalancingSettingsParameters LoadBalancingSettingsModel) (result LoadBalancingSettingsCreateOrUpdateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/LoadBalancingSettingsClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
+ {TargetValue: frontDoorName,
+ Constraints: []validation.Constraint{{Target: "frontDoorName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "frontDoorName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "frontDoorName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$`, Chain: nil}}},
+ {TargetValue: loadBalancingSettingsName,
+ Constraints: []validation.Constraint{{Target: "loadBalancingSettingsName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "loadBalancingSettingsName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "loadBalancingSettingsName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.LoadBalancingSettingsClient", "CreateOrUpdate", err.Error())
+ }
+
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, frontDoorName, loadBalancingSettingsName, loadBalancingSettingsParameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.LoadBalancingSettingsClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.LoadBalancingSettingsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client LoadBalancingSettingsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, frontDoorName string, loadBalancingSettingsName string, loadBalancingSettingsParameters LoadBalancingSettingsModel) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "frontDoorName": autorest.Encode("path", frontDoorName),
+ "loadBalancingSettingsName": autorest.Encode("path", loadBalancingSettingsName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ loadBalancingSettingsParameters.Type = nil
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/loadBalancingSettings/{loadBalancingSettingsName}", pathParameters),
+ autorest.WithJSON(loadBalancingSettingsParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client LoadBalancingSettingsClient) CreateOrUpdateSender(req *http.Request) (future LoadBalancingSettingsCreateOrUpdateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client LoadBalancingSettingsClient) CreateOrUpdateResponder(resp *http.Response) (result LoadBalancingSettingsModel, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes an existing LoadBalancingSettings with the specified parameters.
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// frontDoorName - name of the Front Door which is globally unique.
+// loadBalancingSettingsName - name of the load balancing settings which is unique within the Front Door.
+func (client LoadBalancingSettingsClient) Delete(ctx context.Context, resourceGroupName string, frontDoorName string, loadBalancingSettingsName string) (result LoadBalancingSettingsDeleteFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/LoadBalancingSettingsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
+ {TargetValue: frontDoorName,
+ Constraints: []validation.Constraint{{Target: "frontDoorName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "frontDoorName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "frontDoorName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$`, Chain: nil}}},
+ {TargetValue: loadBalancingSettingsName,
+ Constraints: []validation.Constraint{{Target: "loadBalancingSettingsName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "loadBalancingSettingsName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "loadBalancingSettingsName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.LoadBalancingSettingsClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, frontDoorName, loadBalancingSettingsName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.LoadBalancingSettingsClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.DeleteSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.LoadBalancingSettingsClient", "Delete", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client LoadBalancingSettingsClient) DeletePreparer(ctx context.Context, resourceGroupName string, frontDoorName string, loadBalancingSettingsName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "frontDoorName": autorest.Encode("path", frontDoorName),
+ "loadBalancingSettingsName": autorest.Encode("path", loadBalancingSettingsName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/loadBalancingSettings/{loadBalancingSettingsName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client LoadBalancingSettingsClient) DeleteSender(req *http.Request) (future LoadBalancingSettingsDeleteFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client LoadBalancingSettingsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets a LoadBalancingSettings with the specified Rule name within the specified Front Door.
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// frontDoorName - name of the Front Door which is globally unique.
+// loadBalancingSettingsName - name of the load balancing settings which is unique within the Front Door.
+func (client LoadBalancingSettingsClient) Get(ctx context.Context, resourceGroupName string, frontDoorName string, loadBalancingSettingsName string) (result LoadBalancingSettingsModel, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/LoadBalancingSettingsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
+ {TargetValue: frontDoorName,
+ Constraints: []validation.Constraint{{Target: "frontDoorName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "frontDoorName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "frontDoorName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$`, Chain: nil}}},
+ {TargetValue: loadBalancingSettingsName,
+ Constraints: []validation.Constraint{{Target: "loadBalancingSettingsName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "loadBalancingSettingsName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "loadBalancingSettingsName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.LoadBalancingSettingsClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, frontDoorName, loadBalancingSettingsName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.LoadBalancingSettingsClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "frontdoor.LoadBalancingSettingsClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.LoadBalancingSettingsClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client LoadBalancingSettingsClient) GetPreparer(ctx context.Context, resourceGroupName string, frontDoorName string, loadBalancingSettingsName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "frontDoorName": autorest.Encode("path", frontDoorName),
+ "loadBalancingSettingsName": autorest.Encode("path", loadBalancingSettingsName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/loadBalancingSettings/{loadBalancingSettingsName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client LoadBalancingSettingsClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client LoadBalancingSettingsClient) GetResponder(resp *http.Response) (result LoadBalancingSettingsModel, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByFrontDoor lists all of the LoadBalancingSettings within a Front Door.
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// frontDoorName - name of the Front Door which is globally unique.
+func (client LoadBalancingSettingsClient) ListByFrontDoor(ctx context.Context, resourceGroupName string, frontDoorName string) (result LoadBalancingSettingsListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/LoadBalancingSettingsClient.ListByFrontDoor")
+ defer func() {
+ sc := -1
+ if result.lbslr.Response.Response != nil {
+ sc = result.lbslr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
+ {TargetValue: frontDoorName,
+ Constraints: []validation.Constraint{{Target: "frontDoorName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "frontDoorName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "frontDoorName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.LoadBalancingSettingsClient", "ListByFrontDoor", err.Error())
+ }
+
+ result.fn = client.listByFrontDoorNextResults
+ req, err := client.ListByFrontDoorPreparer(ctx, resourceGroupName, frontDoorName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.LoadBalancingSettingsClient", "ListByFrontDoor", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByFrontDoorSender(req)
+ if err != nil {
+ result.lbslr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "frontdoor.LoadBalancingSettingsClient", "ListByFrontDoor", resp, "Failure sending request")
+ return
+ }
+
+ result.lbslr, err = client.ListByFrontDoorResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.LoadBalancingSettingsClient", "ListByFrontDoor", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByFrontDoorPreparer prepares the ListByFrontDoor request.
+func (client LoadBalancingSettingsClient) ListByFrontDoorPreparer(ctx context.Context, resourceGroupName string, frontDoorName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "frontDoorName": autorest.Encode("path", frontDoorName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/loadBalancingSettings", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByFrontDoorSender sends the ListByFrontDoor request. The method will close the
+// http.Response Body if it receives an error.
+func (client LoadBalancingSettingsClient) ListByFrontDoorSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByFrontDoorResponder handles the response to the ListByFrontDoor request. The method always
+// closes the http.Response Body.
+func (client LoadBalancingSettingsClient) ListByFrontDoorResponder(resp *http.Response) (result LoadBalancingSettingsListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByFrontDoorNextResults retrieves the next set of results, if any.
+func (client LoadBalancingSettingsClient) listByFrontDoorNextResults(ctx context.Context, lastResults LoadBalancingSettingsListResult) (result LoadBalancingSettingsListResult, err error) {
+ req, err := lastResults.loadBalancingSettingsListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "frontdoor.LoadBalancingSettingsClient", "listByFrontDoorNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByFrontDoorSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "frontdoor.LoadBalancingSettingsClient", "listByFrontDoorNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByFrontDoorResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.LoadBalancingSettingsClient", "listByFrontDoorNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByFrontDoorComplete enumerates all values, automatically crossing page boundaries as required.
+func (client LoadBalancingSettingsClient) ListByFrontDoorComplete(ctx context.Context, resourceGroupName string, frontDoorName string) (result LoadBalancingSettingsListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/LoadBalancingSettingsClient.ListByFrontDoor")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByFrontDoor(ctx, resourceGroupName, frontDoorName)
+ return
+}
diff --git a/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/managedrulesets.go b/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/managedrulesets.go
new file mode 100644
index 000000000000..131f3c52f5ef
--- /dev/null
+++ b/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/managedrulesets.go
@@ -0,0 +1,151 @@
+package frontdoor
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// ManagedRuleSetsClient is the frontDoor Client
+type ManagedRuleSetsClient struct {
+ BaseClient
+}
+
+// NewManagedRuleSetsClient creates an instance of the ManagedRuleSetsClient client.
+func NewManagedRuleSetsClient(subscriptionID string) ManagedRuleSetsClient {
+ return NewManagedRuleSetsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewManagedRuleSetsClientWithBaseURI creates an instance of the ManagedRuleSetsClient client.
+func NewManagedRuleSetsClientWithBaseURI(baseURI string, subscriptionID string) ManagedRuleSetsClient {
+ return ManagedRuleSetsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// List lists all available managed rule sets.
+func (client ManagedRuleSetsClient) List(ctx context.Context) (result ManagedRuleSetDefinitionListPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedRuleSetsClient.List")
+ defer func() {
+ sc := -1
+ if result.mrsdl.Response.Response != nil {
+ sc = result.mrsdl.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.ManagedRuleSetsClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.mrsdl.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "frontdoor.ManagedRuleSetsClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.mrsdl, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.ManagedRuleSetsClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client ManagedRuleSetsClient) ListPreparer(ctx context.Context) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-03-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/FrontDoorWebApplicationFirewallManagedRuleSets", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client ManagedRuleSetsClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client ManagedRuleSetsClient) ListResponder(resp *http.Response) (result ManagedRuleSetDefinitionList, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client ManagedRuleSetsClient) listNextResults(ctx context.Context, lastResults ManagedRuleSetDefinitionList) (result ManagedRuleSetDefinitionList, err error) {
+ req, err := lastResults.managedRuleSetDefinitionListPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "frontdoor.ManagedRuleSetsClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "frontdoor.ManagedRuleSetsClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.ManagedRuleSetsClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client ManagedRuleSetsClient) ListComplete(ctx context.Context) (result ManagedRuleSetDefinitionListIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedRuleSetsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx)
+ return
+}
diff --git a/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/models.go b/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/models.go
new file mode 100644
index 000000000000..ab92d8309169
--- /dev/null
+++ b/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/models.go
@@ -0,0 +1,3891 @@
+package frontdoor
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "encoding/json"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/to"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// The package's fully qualified name.
+const fqdn = "github.com/Azure/azure-sdk-for-go/services/preview/frontdoor/mgmt/2019-05-01/frontdoor"
+
+// ActionType enumerates the values for action type.
+type ActionType string
+
+const (
+ // Allow ...
+ Allow ActionType = "Allow"
+ // Block ...
+ Block ActionType = "Block"
+ // Log ...
+ Log ActionType = "Log"
+ // Redirect ...
+ Redirect ActionType = "Redirect"
+)
+
+// PossibleActionTypeValues returns an array of possible values for the ActionType const type.
+func PossibleActionTypeValues() []ActionType {
+ return []ActionType{Allow, Block, Log, Redirect}
+}
+
+// Availability enumerates the values for availability.
+type Availability string
+
+const (
+ // Available ...
+ Available Availability = "Available"
+ // Unavailable ...
+ Unavailable Availability = "Unavailable"
+)
+
+// PossibleAvailabilityValues returns an array of possible values for the Availability const type.
+func PossibleAvailabilityValues() []Availability {
+ return []Availability{Available, Unavailable}
+}
+
+// BackendEnabledState enumerates the values for backend enabled state.
+type BackendEnabledState string
+
+const (
+ // Disabled ...
+ Disabled BackendEnabledState = "Disabled"
+ // Enabled ...
+ Enabled BackendEnabledState = "Enabled"
+)
+
+// PossibleBackendEnabledStateValues returns an array of possible values for the BackendEnabledState const type.
+func PossibleBackendEnabledStateValues() []BackendEnabledState {
+ return []BackendEnabledState{Disabled, Enabled}
+}
+
+// CertificateSource enumerates the values for certificate source.
+type CertificateSource string
+
+const (
+ // CertificateSourceAzureKeyVault ...
+ CertificateSourceAzureKeyVault CertificateSource = "AzureKeyVault"
+ // CertificateSourceFrontDoor ...
+ CertificateSourceFrontDoor CertificateSource = "FrontDoor"
+)
+
+// PossibleCertificateSourceValues returns an array of possible values for the CertificateSource const type.
+func PossibleCertificateSourceValues() []CertificateSource {
+ return []CertificateSource{CertificateSourceAzureKeyVault, CertificateSourceFrontDoor}
+}
+
+// CertificateType enumerates the values for certificate type.
+type CertificateType string
+
+const (
+ // Dedicated ...
+ Dedicated CertificateType = "Dedicated"
+)
+
+// PossibleCertificateTypeValues returns an array of possible values for the CertificateType const type.
+func PossibleCertificateTypeValues() []CertificateType {
+ return []CertificateType{Dedicated}
+}
+
+// CustomHTTPSProvisioningState enumerates the values for custom https provisioning state.
+type CustomHTTPSProvisioningState string
+
+const (
+ // CustomHTTPSProvisioningStateDisabled ...
+ CustomHTTPSProvisioningStateDisabled CustomHTTPSProvisioningState = "Disabled"
+ // CustomHTTPSProvisioningStateDisabling ...
+ CustomHTTPSProvisioningStateDisabling CustomHTTPSProvisioningState = "Disabling"
+ // CustomHTTPSProvisioningStateEnabled ...
+ CustomHTTPSProvisioningStateEnabled CustomHTTPSProvisioningState = "Enabled"
+ // CustomHTTPSProvisioningStateEnabling ...
+ CustomHTTPSProvisioningStateEnabling CustomHTTPSProvisioningState = "Enabling"
+ // CustomHTTPSProvisioningStateFailed ...
+ CustomHTTPSProvisioningStateFailed CustomHTTPSProvisioningState = "Failed"
+)
+
+// PossibleCustomHTTPSProvisioningStateValues returns an array of possible values for the CustomHTTPSProvisioningState const type.
+func PossibleCustomHTTPSProvisioningStateValues() []CustomHTTPSProvisioningState {
+ return []CustomHTTPSProvisioningState{CustomHTTPSProvisioningStateDisabled, CustomHTTPSProvisioningStateDisabling, CustomHTTPSProvisioningStateEnabled, CustomHTTPSProvisioningStateEnabling, CustomHTTPSProvisioningStateFailed}
+}
+
+// CustomHTTPSProvisioningSubstate enumerates the values for custom https provisioning substate.
+type CustomHTTPSProvisioningSubstate string
+
+const (
+ // CertificateDeleted ...
+ CertificateDeleted CustomHTTPSProvisioningSubstate = "CertificateDeleted"
+ // CertificateDeployed ...
+ CertificateDeployed CustomHTTPSProvisioningSubstate = "CertificateDeployed"
+ // DeletingCertificate ...
+ DeletingCertificate CustomHTTPSProvisioningSubstate = "DeletingCertificate"
+ // DeployingCertificate ...
+ DeployingCertificate CustomHTTPSProvisioningSubstate = "DeployingCertificate"
+ // DomainControlValidationRequestApproved ...
+ DomainControlValidationRequestApproved CustomHTTPSProvisioningSubstate = "DomainControlValidationRequestApproved"
+ // DomainControlValidationRequestRejected ...
+ DomainControlValidationRequestRejected CustomHTTPSProvisioningSubstate = "DomainControlValidationRequestRejected"
+ // DomainControlValidationRequestTimedOut ...
+ DomainControlValidationRequestTimedOut CustomHTTPSProvisioningSubstate = "DomainControlValidationRequestTimedOut"
+ // IssuingCertificate ...
+ IssuingCertificate CustomHTTPSProvisioningSubstate = "IssuingCertificate"
+ // PendingDomainControlValidationREquestApproval ...
+ PendingDomainControlValidationREquestApproval CustomHTTPSProvisioningSubstate = "PendingDomainControlValidationREquestApproval"
+ // SubmittingDomainControlValidationRequest ...
+ SubmittingDomainControlValidationRequest CustomHTTPSProvisioningSubstate = "SubmittingDomainControlValidationRequest"
+)
+
+// PossibleCustomHTTPSProvisioningSubstateValues returns an array of possible values for the CustomHTTPSProvisioningSubstate const type.
+func PossibleCustomHTTPSProvisioningSubstateValues() []CustomHTTPSProvisioningSubstate {
+ return []CustomHTTPSProvisioningSubstate{CertificateDeleted, CertificateDeployed, DeletingCertificate, DeployingCertificate, DomainControlValidationRequestApproved, DomainControlValidationRequestRejected, DomainControlValidationRequestTimedOut, IssuingCertificate, PendingDomainControlValidationREquestApproval, SubmittingDomainControlValidationRequest}
+}
+
+// CustomRuleEnabledState enumerates the values for custom rule enabled state.
+type CustomRuleEnabledState string
+
+const (
+ // CustomRuleEnabledStateDisabled ...
+ CustomRuleEnabledStateDisabled CustomRuleEnabledState = "Disabled"
+ // CustomRuleEnabledStateEnabled ...
+ CustomRuleEnabledStateEnabled CustomRuleEnabledState = "Enabled"
+)
+
+// PossibleCustomRuleEnabledStateValues returns an array of possible values for the CustomRuleEnabledState const type.
+func PossibleCustomRuleEnabledStateValues() []CustomRuleEnabledState {
+ return []CustomRuleEnabledState{CustomRuleEnabledStateDisabled, CustomRuleEnabledStateEnabled}
+}
+
+// DynamicCompressionEnabled enumerates the values for dynamic compression enabled.
+type DynamicCompressionEnabled string
+
+const (
+ // DynamicCompressionEnabledDisabled ...
+ DynamicCompressionEnabledDisabled DynamicCompressionEnabled = "Disabled"
+ // DynamicCompressionEnabledEnabled ...
+ DynamicCompressionEnabledEnabled DynamicCompressionEnabled = "Enabled"
+)
+
+// PossibleDynamicCompressionEnabledValues returns an array of possible values for the DynamicCompressionEnabled const type.
+func PossibleDynamicCompressionEnabledValues() []DynamicCompressionEnabled {
+ return []DynamicCompressionEnabled{DynamicCompressionEnabledDisabled, DynamicCompressionEnabledEnabled}
+}
+
+// EnabledState enumerates the values for enabled state.
+type EnabledState string
+
+const (
+ // EnabledStateDisabled ...
+ EnabledStateDisabled EnabledState = "Disabled"
+ // EnabledStateEnabled ...
+ EnabledStateEnabled EnabledState = "Enabled"
+)
+
+// PossibleEnabledStateValues returns an array of possible values for the EnabledState const type.
+func PossibleEnabledStateValues() []EnabledState {
+ return []EnabledState{EnabledStateDisabled, EnabledStateEnabled}
+}
+
+// EnforceCertificateNameCheckEnabledState enumerates the values for enforce certificate name check enabled
+// state.
+type EnforceCertificateNameCheckEnabledState string
+
+const (
+ // EnforceCertificateNameCheckEnabledStateDisabled ...
+ EnforceCertificateNameCheckEnabledStateDisabled EnforceCertificateNameCheckEnabledState = "Disabled"
+ // EnforceCertificateNameCheckEnabledStateEnabled ...
+ EnforceCertificateNameCheckEnabledStateEnabled EnforceCertificateNameCheckEnabledState = "Enabled"
+)
+
+// PossibleEnforceCertificateNameCheckEnabledStateValues returns an array of possible values for the EnforceCertificateNameCheckEnabledState const type.
+func PossibleEnforceCertificateNameCheckEnabledStateValues() []EnforceCertificateNameCheckEnabledState {
+ return []EnforceCertificateNameCheckEnabledState{EnforceCertificateNameCheckEnabledStateDisabled, EnforceCertificateNameCheckEnabledStateEnabled}
+}
+
+// ForwardingProtocol enumerates the values for forwarding protocol.
+type ForwardingProtocol string
+
+const (
+ // HTTPOnly ...
+ HTTPOnly ForwardingProtocol = "HttpOnly"
+ // HTTPSOnly ...
+ HTTPSOnly ForwardingProtocol = "HttpsOnly"
+ // MatchRequest ...
+ MatchRequest ForwardingProtocol = "MatchRequest"
+)
+
+// PossibleForwardingProtocolValues returns an array of possible values for the ForwardingProtocol const type.
+func PossibleForwardingProtocolValues() []ForwardingProtocol {
+ return []ForwardingProtocol{HTTPOnly, HTTPSOnly, MatchRequest}
+}
+
+// HealthProbeEnabled enumerates the values for health probe enabled.
+type HealthProbeEnabled string
+
+const (
+ // HealthProbeEnabledDisabled ...
+ HealthProbeEnabledDisabled HealthProbeEnabled = "Disabled"
+ // HealthProbeEnabledEnabled ...
+ HealthProbeEnabledEnabled HealthProbeEnabled = "Enabled"
+)
+
+// PossibleHealthProbeEnabledValues returns an array of possible values for the HealthProbeEnabled const type.
+func PossibleHealthProbeEnabledValues() []HealthProbeEnabled {
+ return []HealthProbeEnabled{HealthProbeEnabledDisabled, HealthProbeEnabledEnabled}
+}
+
+// HealthProbeMethod enumerates the values for health probe method.
+type HealthProbeMethod string
+
+const (
+ // GET ...
+ GET HealthProbeMethod = "GET"
+ // HEAD ...
+ HEAD HealthProbeMethod = "HEAD"
+)
+
+// PossibleHealthProbeMethodValues returns an array of possible values for the HealthProbeMethod const type.
+func PossibleHealthProbeMethodValues() []HealthProbeMethod {
+ return []HealthProbeMethod{GET, HEAD}
+}
+
+// ManagedRuleEnabledState enumerates the values for managed rule enabled state.
+type ManagedRuleEnabledState string
+
+const (
+ // ManagedRuleEnabledStateDisabled ...
+ ManagedRuleEnabledStateDisabled ManagedRuleEnabledState = "Disabled"
+ // ManagedRuleEnabledStateEnabled ...
+ ManagedRuleEnabledStateEnabled ManagedRuleEnabledState = "Enabled"
+)
+
+// PossibleManagedRuleEnabledStateValues returns an array of possible values for the ManagedRuleEnabledState const type.
+func PossibleManagedRuleEnabledStateValues() []ManagedRuleEnabledState {
+ return []ManagedRuleEnabledState{ManagedRuleEnabledStateDisabled, ManagedRuleEnabledStateEnabled}
+}
+
+// MatchVariable enumerates the values for match variable.
+type MatchVariable string
+
+const (
+ // Cookies ...
+ Cookies MatchVariable = "Cookies"
+ // PostArgs ...
+ PostArgs MatchVariable = "PostArgs"
+ // QueryString ...
+ QueryString MatchVariable = "QueryString"
+ // RemoteAddr ...
+ RemoteAddr MatchVariable = "RemoteAddr"
+ // RequestBody ...
+ RequestBody MatchVariable = "RequestBody"
+ // RequestHeader ...
+ RequestHeader MatchVariable = "RequestHeader"
+ // RequestMethod ...
+ RequestMethod MatchVariable = "RequestMethod"
+ // RequestURI ...
+ RequestURI MatchVariable = "RequestUri"
+ // SocketAddr ...
+ SocketAddr MatchVariable = "SocketAddr"
+)
+
+// PossibleMatchVariableValues returns an array of possible values for the MatchVariable const type.
+func PossibleMatchVariableValues() []MatchVariable {
+ return []MatchVariable{Cookies, PostArgs, QueryString, RemoteAddr, RequestBody, RequestHeader, RequestMethod, RequestURI, SocketAddr}
+}
+
+// MinimumTLSVersion enumerates the values for minimum tls version.
+type MinimumTLSVersion string
+
+const (
+ // OneFullStopTwo ...
+ OneFullStopTwo MinimumTLSVersion = "1.2"
+ // OneFullStopZero ...
+ OneFullStopZero MinimumTLSVersion = "1.0"
+)
+
+// PossibleMinimumTLSVersionValues returns an array of possible values for the MinimumTLSVersion const type.
+func PossibleMinimumTLSVersionValues() []MinimumTLSVersion {
+ return []MinimumTLSVersion{OneFullStopTwo, OneFullStopZero}
+}
+
+// NetworkOperationStatus enumerates the values for network operation status.
+type NetworkOperationStatus string
+
+const (
+ // Failed ...
+ Failed NetworkOperationStatus = "Failed"
+ // InProgress ...
+ InProgress NetworkOperationStatus = "InProgress"
+ // Succeeded ...
+ Succeeded NetworkOperationStatus = "Succeeded"
+)
+
+// PossibleNetworkOperationStatusValues returns an array of possible values for the NetworkOperationStatus const type.
+func PossibleNetworkOperationStatusValues() []NetworkOperationStatus {
+ return []NetworkOperationStatus{Failed, InProgress, Succeeded}
+}
+
+// OdataType enumerates the values for odata type.
+type OdataType string
+
+const (
+ // OdataTypeMicrosoftAzureFrontDoorModelsFrontdoorForwardingConfiguration ...
+ OdataTypeMicrosoftAzureFrontDoorModelsFrontdoorForwardingConfiguration OdataType = "#Microsoft.Azure.FrontDoor.Models.FrontdoorForwardingConfiguration"
+ // OdataTypeMicrosoftAzureFrontDoorModelsFrontdoorRedirectConfiguration ...
+ OdataTypeMicrosoftAzureFrontDoorModelsFrontdoorRedirectConfiguration OdataType = "#Microsoft.Azure.FrontDoor.Models.FrontdoorRedirectConfiguration"
+ // OdataTypeRouteConfiguration ...
+ OdataTypeRouteConfiguration OdataType = "RouteConfiguration"
+)
+
+// PossibleOdataTypeValues returns an array of possible values for the OdataType const type.
+func PossibleOdataTypeValues() []OdataType {
+ return []OdataType{OdataTypeMicrosoftAzureFrontDoorModelsFrontdoorForwardingConfiguration, OdataTypeMicrosoftAzureFrontDoorModelsFrontdoorRedirectConfiguration, OdataTypeRouteConfiguration}
+}
+
+// Operator enumerates the values for operator.
+type Operator string
+
+const (
+ // Any ...
+ Any Operator = "Any"
+ // BeginsWith ...
+ BeginsWith Operator = "BeginsWith"
+ // Contains ...
+ Contains Operator = "Contains"
+ // EndsWith ...
+ EndsWith Operator = "EndsWith"
+ // Equal ...
+ Equal Operator = "Equal"
+ // GeoMatch ...
+ GeoMatch Operator = "GeoMatch"
+ // GreaterThan ...
+ GreaterThan Operator = "GreaterThan"
+ // GreaterThanOrEqual ...
+ GreaterThanOrEqual Operator = "GreaterThanOrEqual"
+ // IPMatch ...
+ IPMatch Operator = "IPMatch"
+ // LessThan ...
+ LessThan Operator = "LessThan"
+ // LessThanOrEqual ...
+ LessThanOrEqual Operator = "LessThanOrEqual"
+ // RegEx ...
+ RegEx Operator = "RegEx"
+)
+
+// PossibleOperatorValues returns an array of possible values for the Operator const type.
+func PossibleOperatorValues() []Operator {
+ return []Operator{Any, BeginsWith, Contains, EndsWith, Equal, GeoMatch, GreaterThan, GreaterThanOrEqual, IPMatch, LessThan, LessThanOrEqual, RegEx}
+}
+
+// PolicyEnabledState enumerates the values for policy enabled state.
+type PolicyEnabledState string
+
+const (
+ // PolicyEnabledStateDisabled ...
+ PolicyEnabledStateDisabled PolicyEnabledState = "Disabled"
+ // PolicyEnabledStateEnabled ...
+ PolicyEnabledStateEnabled PolicyEnabledState = "Enabled"
+)
+
+// PossiblePolicyEnabledStateValues returns an array of possible values for the PolicyEnabledState const type.
+func PossiblePolicyEnabledStateValues() []PolicyEnabledState {
+ return []PolicyEnabledState{PolicyEnabledStateDisabled, PolicyEnabledStateEnabled}
+}
+
+// PolicyMode enumerates the values for policy mode.
+type PolicyMode string
+
+const (
+ // Detection ...
+ Detection PolicyMode = "Detection"
+ // Prevention ...
+ Prevention PolicyMode = "Prevention"
+)
+
+// PossiblePolicyModeValues returns an array of possible values for the PolicyMode const type.
+func PossiblePolicyModeValues() []PolicyMode {
+ return []PolicyMode{Detection, Prevention}
+}
+
+// PolicyResourceState enumerates the values for policy resource state.
+type PolicyResourceState string
+
+const (
+ // PolicyResourceStateCreating ...
+ PolicyResourceStateCreating PolicyResourceState = "Creating"
+ // PolicyResourceStateDeleting ...
+ PolicyResourceStateDeleting PolicyResourceState = "Deleting"
+ // PolicyResourceStateDisabled ...
+ PolicyResourceStateDisabled PolicyResourceState = "Disabled"
+ // PolicyResourceStateDisabling ...
+ PolicyResourceStateDisabling PolicyResourceState = "Disabling"
+ // PolicyResourceStateEnabled ...
+ PolicyResourceStateEnabled PolicyResourceState = "Enabled"
+ // PolicyResourceStateEnabling ...
+ PolicyResourceStateEnabling PolicyResourceState = "Enabling"
+)
+
+// PossiblePolicyResourceStateValues returns an array of possible values for the PolicyResourceState const type.
+func PossiblePolicyResourceStateValues() []PolicyResourceState {
+ return []PolicyResourceState{PolicyResourceStateCreating, PolicyResourceStateDeleting, PolicyResourceStateDisabled, PolicyResourceStateDisabling, PolicyResourceStateEnabled, PolicyResourceStateEnabling}
+}
+
+// Protocol enumerates the values for protocol.
+type Protocol string
+
+const (
+ // HTTP ...
+ HTTP Protocol = "Http"
+ // HTTPS ...
+ HTTPS Protocol = "Https"
+)
+
+// PossibleProtocolValues returns an array of possible values for the Protocol const type.
+func PossibleProtocolValues() []Protocol {
+ return []Protocol{HTTP, HTTPS}
+}
+
+// Query enumerates the values for query.
+type Query string
+
+const (
+ // StripAll ...
+ StripAll Query = "StripAll"
+ // StripNone ...
+ StripNone Query = "StripNone"
+)
+
+// PossibleQueryValues returns an array of possible values for the Query const type.
+func PossibleQueryValues() []Query {
+ return []Query{StripAll, StripNone}
+}
+
+// RedirectProtocol enumerates the values for redirect protocol.
+type RedirectProtocol string
+
+const (
+ // RedirectProtocolHTTPOnly ...
+ RedirectProtocolHTTPOnly RedirectProtocol = "HttpOnly"
+ // RedirectProtocolHTTPSOnly ...
+ RedirectProtocolHTTPSOnly RedirectProtocol = "HttpsOnly"
+ // RedirectProtocolMatchRequest ...
+ RedirectProtocolMatchRequest RedirectProtocol = "MatchRequest"
+)
+
+// PossibleRedirectProtocolValues returns an array of possible values for the RedirectProtocol const type.
+func PossibleRedirectProtocolValues() []RedirectProtocol {
+ return []RedirectProtocol{RedirectProtocolHTTPOnly, RedirectProtocolHTTPSOnly, RedirectProtocolMatchRequest}
+}
+
+// RedirectType enumerates the values for redirect type.
+type RedirectType string
+
+const (
+ // Found ...
+ Found RedirectType = "Found"
+ // Moved ...
+ Moved RedirectType = "Moved"
+ // PermanentRedirect ...
+ PermanentRedirect RedirectType = "PermanentRedirect"
+ // TemporaryRedirect ...
+ TemporaryRedirect RedirectType = "TemporaryRedirect"
+)
+
+// PossibleRedirectTypeValues returns an array of possible values for the RedirectType const type.
+func PossibleRedirectTypeValues() []RedirectType {
+ return []RedirectType{Found, Moved, PermanentRedirect, TemporaryRedirect}
+}
+
+// ResourceState enumerates the values for resource state.
+type ResourceState string
+
+const (
+ // ResourceStateCreating ...
+ ResourceStateCreating ResourceState = "Creating"
+ // ResourceStateDeleting ...
+ ResourceStateDeleting ResourceState = "Deleting"
+ // ResourceStateDisabled ...
+ ResourceStateDisabled ResourceState = "Disabled"
+ // ResourceStateDisabling ...
+ ResourceStateDisabling ResourceState = "Disabling"
+ // ResourceStateEnabled ...
+ ResourceStateEnabled ResourceState = "Enabled"
+ // ResourceStateEnabling ...
+ ResourceStateEnabling ResourceState = "Enabling"
+)
+
+// PossibleResourceStateValues returns an array of possible values for the ResourceState const type.
+func PossibleResourceStateValues() []ResourceState {
+ return []ResourceState{ResourceStateCreating, ResourceStateDeleting, ResourceStateDisabled, ResourceStateDisabling, ResourceStateEnabled, ResourceStateEnabling}
+}
+
+// ResourceType enumerates the values for resource type.
+type ResourceType string
+
+const (
+ // MicrosoftNetworkfrontDoors ...
+ MicrosoftNetworkfrontDoors ResourceType = "Microsoft.Network/frontDoors"
+ // MicrosoftNetworkfrontDoorsfrontendEndpoints ...
+ MicrosoftNetworkfrontDoorsfrontendEndpoints ResourceType = "Microsoft.Network/frontDoors/frontendEndpoints"
+)
+
+// PossibleResourceTypeValues returns an array of possible values for the ResourceType const type.
+func PossibleResourceTypeValues() []ResourceType {
+ return []ResourceType{MicrosoftNetworkfrontDoors, MicrosoftNetworkfrontDoorsfrontendEndpoints}
+}
+
+// RoutingRuleEnabledState enumerates the values for routing rule enabled state.
+type RoutingRuleEnabledState string
+
+const (
+ // RoutingRuleEnabledStateDisabled ...
+ RoutingRuleEnabledStateDisabled RoutingRuleEnabledState = "Disabled"
+ // RoutingRuleEnabledStateEnabled ...
+ RoutingRuleEnabledStateEnabled RoutingRuleEnabledState = "Enabled"
+)
+
+// PossibleRoutingRuleEnabledStateValues returns an array of possible values for the RoutingRuleEnabledState const type.
+func PossibleRoutingRuleEnabledStateValues() []RoutingRuleEnabledState {
+ return []RoutingRuleEnabledState{RoutingRuleEnabledStateDisabled, RoutingRuleEnabledStateEnabled}
+}
+
+// RuleType enumerates the values for rule type.
+type RuleType string
+
+const (
+ // MatchRule ...
+ MatchRule RuleType = "MatchRule"
+ // RateLimitRule ...
+ RateLimitRule RuleType = "RateLimitRule"
+)
+
+// PossibleRuleTypeValues returns an array of possible values for the RuleType const type.
+func PossibleRuleTypeValues() []RuleType {
+ return []RuleType{MatchRule, RateLimitRule}
+}
+
+// SessionAffinityEnabledState enumerates the values for session affinity enabled state.
+type SessionAffinityEnabledState string
+
+const (
+ // SessionAffinityEnabledStateDisabled ...
+ SessionAffinityEnabledStateDisabled SessionAffinityEnabledState = "Disabled"
+ // SessionAffinityEnabledStateEnabled ...
+ SessionAffinityEnabledStateEnabled SessionAffinityEnabledState = "Enabled"
+)
+
+// PossibleSessionAffinityEnabledStateValues returns an array of possible values for the SessionAffinityEnabledState const type.
+func PossibleSessionAffinityEnabledStateValues() []SessionAffinityEnabledState {
+ return []SessionAffinityEnabledState{SessionAffinityEnabledStateDisabled, SessionAffinityEnabledStateEnabled}
+}
+
+// TransformType enumerates the values for transform type.
+type TransformType string
+
+const (
+ // Lowercase ...
+ Lowercase TransformType = "Lowercase"
+ // RemoveNulls ...
+ RemoveNulls TransformType = "RemoveNulls"
+ // Trim ...
+ Trim TransformType = "Trim"
+ // Uppercase ...
+ Uppercase TransformType = "Uppercase"
+ // URLDecode ...
+ URLDecode TransformType = "UrlDecode"
+ // URLEncode ...
+ URLEncode TransformType = "UrlEncode"
+)
+
+// PossibleTransformTypeValues returns an array of possible values for the TransformType const type.
+func PossibleTransformTypeValues() []TransformType {
+ return []TransformType{Lowercase, RemoveNulls, Trim, Uppercase, URLDecode, URLEncode}
+}
+
+// AzureAsyncOperationResult the response body contains the status of the specified asynchronous operation,
+// indicating whether it has succeeded, is in progress, or has failed. Note that this status is distinct
+// from the HTTP status code returned for the Get Operation Status operation itself. If the asynchronous
+// operation succeeded, the response body includes the HTTP status code for the successful request. If the
+// asynchronous operation failed, the response body includes the HTTP status code for the failed request
+// and error information regarding the failure.
+type AzureAsyncOperationResult struct {
+ // Status - Status of the Azure async operation. Possible values are: 'InProgress', 'Succeeded', and 'Failed'. Possible values include: 'InProgress', 'Succeeded', 'Failed'
+ Status NetworkOperationStatus `json:"status,omitempty"`
+ Error *Error `json:"error,omitempty"`
+}
+
+// Backend backend address of a frontDoor load balancer.
+type Backend struct {
+ // Address - Location of the backend (IP address or FQDN)
+ Address *string `json:"address,omitempty"`
+ // HTTPPort - The HTTP TCP port number. Must be between 1 and 65535.
+ HTTPPort *int32 `json:"httpPort,omitempty"`
+ // HTTPSPort - The HTTPS TCP port number. Must be between 1 and 65535.
+ HTTPSPort *int32 `json:"httpsPort,omitempty"`
+ // EnabledState - Whether to enable use of this backend. Permitted values are 'Enabled' or 'Disabled'. Possible values include: 'Enabled', 'Disabled'
+ EnabledState BackendEnabledState `json:"enabledState,omitempty"`
+ // Priority - Priority to use for load balancing. Higher priorities will not be used for load balancing if any lower priority backend is healthy.
+ Priority *int32 `json:"priority,omitempty"`
+ // Weight - Weight of this endpoint for load balancing purposes.
+ Weight *int32 `json:"weight,omitempty"`
+ // BackendHostHeader - The value to use as the host header sent to the backend. If blank or unspecified, this defaults to the incoming host.
+ BackendHostHeader *string `json:"backendHostHeader,omitempty"`
+}
+
+// BackendPool a backend pool is a collection of backends that can be routed to.
+type BackendPool struct {
+ autorest.Response `json:"-"`
+ // BackendPoolProperties - Properties of the Front Door Backend Pool
+ *BackendPoolProperties `json:"properties,omitempty"`
+ // Name - Resource name.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Resource type.
+ Type *string `json:"type,omitempty"`
+ // ID - Resource ID.
+ ID *string `json:"id,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for BackendPool.
+func (bp BackendPool) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if bp.BackendPoolProperties != nil {
+ objectMap["properties"] = bp.BackendPoolProperties
+ }
+ if bp.Name != nil {
+ objectMap["name"] = bp.Name
+ }
+ if bp.ID != nil {
+ objectMap["id"] = bp.ID
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for BackendPool struct.
+func (bp *BackendPool) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var backendPoolProperties BackendPoolProperties
+ err = json.Unmarshal(*v, &backendPoolProperties)
+ if err != nil {
+ return err
+ }
+ bp.BackendPoolProperties = &backendPoolProperties
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ bp.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ bp.Type = &typeVar
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ bp.ID = &ID
+ }
+ }
+ }
+
+ return nil
+}
+
+// BackendPoolListResult result of the request to list Backend Pools. It contains a list of Backend Pools
+// objects and a URL link to get the next set of results.
+type BackendPoolListResult struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; List of Backend Pools within a Front Door.
+ Value *[]BackendPool `json:"value,omitempty"`
+ // NextLink - URL to get the next set of BackendPool objects if there are any.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// BackendPoolListResultIterator provides access to a complete listing of BackendPool values.
+type BackendPoolListResultIterator struct {
+ i int
+ page BackendPoolListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *BackendPoolListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BackendPoolListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *BackendPoolListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter BackendPoolListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter BackendPoolListResultIterator) Response() BackendPoolListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter BackendPoolListResultIterator) Value() BackendPool {
+ if !iter.page.NotDone() {
+ return BackendPool{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the BackendPoolListResultIterator type.
+func NewBackendPoolListResultIterator(page BackendPoolListResultPage) BackendPoolListResultIterator {
+ return BackendPoolListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (bplr BackendPoolListResult) IsEmpty() bool {
+ return bplr.Value == nil || len(*bplr.Value) == 0
+}
+
+// backendPoolListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (bplr BackendPoolListResult) backendPoolListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if bplr.NextLink == nil || len(to.String(bplr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(bplr.NextLink)))
+}
+
+// BackendPoolListResultPage contains a page of BackendPool values.
+type BackendPoolListResultPage struct {
+ fn func(context.Context, BackendPoolListResult) (BackendPoolListResult, error)
+ bplr BackendPoolListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *BackendPoolListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BackendPoolListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.bplr)
+ if err != nil {
+ return err
+ }
+ page.bplr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *BackendPoolListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page BackendPoolListResultPage) NotDone() bool {
+ return !page.bplr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page BackendPoolListResultPage) Response() BackendPoolListResult {
+ return page.bplr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page BackendPoolListResultPage) Values() []BackendPool {
+ if page.bplr.IsEmpty() {
+ return nil
+ }
+ return *page.bplr.Value
+}
+
+// Creates a new instance of the BackendPoolListResultPage type.
+func NewBackendPoolListResultPage(getNextPage func(context.Context, BackendPoolListResult) (BackendPoolListResult, error)) BackendPoolListResultPage {
+ return BackendPoolListResultPage{fn: getNextPage}
+}
+
+// BackendPoolProperties the JSON object that contains the properties required to create a routing rule.
+type BackendPoolProperties struct {
+ // ResourceState - Resource status. Possible values include: 'ResourceStateCreating', 'ResourceStateEnabling', 'ResourceStateEnabled', 'ResourceStateDisabling', 'ResourceStateDisabled', 'ResourceStateDeleting'
+ ResourceState ResourceState `json:"resourceState,omitempty"`
+ // Backends - The set of backends for this pool
+ Backends *[]Backend `json:"backends,omitempty"`
+ // LoadBalancingSettings - Load balancing settings for a backend pool
+ LoadBalancingSettings *SubResource `json:"loadBalancingSettings,omitempty"`
+ // HealthProbeSettings - L7 health probe settings for a backend pool
+ HealthProbeSettings *SubResource `json:"healthProbeSettings,omitempty"`
+}
+
+// BackendPoolsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type BackendPoolsCreateOrUpdateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *BackendPoolsCreateOrUpdateFuture) Result(client BackendPoolsClient) (bp BackendPool, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.BackendPoolsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("frontdoor.BackendPoolsCreateOrUpdateFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if bp.Response.Response, err = future.GetResult(sender); err == nil && bp.Response.Response.StatusCode != http.StatusNoContent {
+ bp, err = client.CreateOrUpdateResponder(bp.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.BackendPoolsCreateOrUpdateFuture", "Result", bp.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// BackendPoolsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type BackendPoolsDeleteFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *BackendPoolsDeleteFuture) Result(client BackendPoolsClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.BackendPoolsDeleteFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("frontdoor.BackendPoolsDeleteFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// BackendPoolsSettings settings that apply to all backend pools.
+type BackendPoolsSettings struct {
+ // EnforceCertificateNameCheck - Whether to enforce certificate name check on HTTPS requests to all backend pools. No effect on non-HTTPS requests. Possible values include: 'EnforceCertificateNameCheckEnabledStateEnabled', 'EnforceCertificateNameCheckEnabledStateDisabled'
+ EnforceCertificateNameCheck EnforceCertificateNameCheckEnabledState `json:"enforceCertificateNameCheck,omitempty"`
+ // SendRecvTimeoutSeconds - Send and receive timeout on forwarding request to the backend. When timeout is reached, the request fails and returns.
+ SendRecvTimeoutSeconds *int32 `json:"sendRecvTimeoutSeconds,omitempty"`
+}
+
+// BackendPoolUpdateParameters a collection of backends that can be routed to.
+type BackendPoolUpdateParameters struct {
+ // Backends - The set of backends for this pool
+ Backends *[]Backend `json:"backends,omitempty"`
+ // LoadBalancingSettings - Load balancing settings for a backend pool
+ LoadBalancingSettings *SubResource `json:"loadBalancingSettings,omitempty"`
+ // HealthProbeSettings - L7 health probe settings for a backend pool
+ HealthProbeSettings *SubResource `json:"healthProbeSettings,omitempty"`
+}
+
+// CacheConfiguration caching settings for a caching-type route. To disable caching, do not provide a
+// cacheConfiguration object.
+type CacheConfiguration struct {
+ // QueryParameterStripDirective - Treatment of URL query terms when forming the cache key. Possible values include: 'StripNone', 'StripAll'
+ QueryParameterStripDirective Query `json:"queryParameterStripDirective,omitempty"`
+ // DynamicCompression - Whether to use dynamic compression for cached content. Possible values include: 'DynamicCompressionEnabledEnabled', 'DynamicCompressionEnabledDisabled'
+ DynamicCompression DynamicCompressionEnabled `json:"dynamicCompression,omitempty"`
+}
+
+// CertificateSourceParameters parameters required for enabling SSL with Front Door-managed certificates
+type CertificateSourceParameters struct {
+ // CertificateType - Defines the type of the certificate used for secure connections to a frontendEndpoint. Possible values include: 'Dedicated'
+ CertificateType CertificateType `json:"certificateType,omitempty"`
+}
+
+// CheckNameAvailabilityInput input of CheckNameAvailability API.
+type CheckNameAvailabilityInput struct {
+ // Name - The resource name to validate.
+ Name *string `json:"name,omitempty"`
+ // Type - The type of the resource whose name is to be validated. Possible values include: 'MicrosoftNetworkfrontDoors', 'MicrosoftNetworkfrontDoorsfrontendEndpoints'
+ Type ResourceType `json:"type,omitempty"`
+}
+
+// CheckNameAvailabilityOutput output of check name availability API.
+type CheckNameAvailabilityOutput struct {
+ autorest.Response `json:"-"`
+ // NameAvailability - READ-ONLY; Indicates whether the name is available. Possible values include: 'Available', 'Unavailable'
+ NameAvailability Availability `json:"nameAvailability,omitempty"`
+ // Reason - READ-ONLY; The reason why the name is not available.
+ Reason *string `json:"reason,omitempty"`
+ // Message - READ-ONLY; The detailed error message describing why the name is not available.
+ Message *string `json:"message,omitempty"`
+}
+
+// CustomHTTPSConfiguration https settings for a domain
+type CustomHTTPSConfiguration struct {
+ // CertificateSource - Defines the source of the SSL certificate. Possible values include: 'CertificateSourceAzureKeyVault', 'CertificateSourceFrontDoor'
+ CertificateSource CertificateSource `json:"certificateSource,omitempty"`
+ // ProtocolType - Defines the TLS extension protocol that is used for secure delivery
+ ProtocolType *string `json:"protocolType,omitempty"`
+ // MinimumTLSVersion - The minimum TLS version required from the clients to establish an SSL handshake with Front Door. Possible values include: 'OneFullStopZero', 'OneFullStopTwo'
+ MinimumTLSVersion MinimumTLSVersion `json:"minimumTlsVersion,omitempty"`
+ // KeyVaultCertificateSourceParameters - KeyVault certificate source parameters (if certificateSource=AzureKeyVault)
+ *KeyVaultCertificateSourceParameters `json:"keyVaultCertificateSourceParameters,omitempty"`
+ // CertificateSourceParameters - Parameters required for enabling SSL with Front Door-managed certificates (if certificateSource=FrontDoor)
+ *CertificateSourceParameters `json:"frontDoorCertificateSourceParameters,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for CustomHTTPSConfiguration.
+func (chc CustomHTTPSConfiguration) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if chc.CertificateSource != "" {
+ objectMap["certificateSource"] = chc.CertificateSource
+ }
+ if chc.ProtocolType != nil {
+ objectMap["protocolType"] = chc.ProtocolType
+ }
+ if chc.MinimumTLSVersion != "" {
+ objectMap["minimumTlsVersion"] = chc.MinimumTLSVersion
+ }
+ if chc.KeyVaultCertificateSourceParameters != nil {
+ objectMap["keyVaultCertificateSourceParameters"] = chc.KeyVaultCertificateSourceParameters
+ }
+ if chc.CertificateSourceParameters != nil {
+ objectMap["frontDoorCertificateSourceParameters"] = chc.CertificateSourceParameters
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for CustomHTTPSConfiguration struct.
+func (chc *CustomHTTPSConfiguration) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "certificateSource":
+ if v != nil {
+ var certificateSource CertificateSource
+ err = json.Unmarshal(*v, &certificateSource)
+ if err != nil {
+ return err
+ }
+ chc.CertificateSource = certificateSource
+ }
+ case "protocolType":
+ if v != nil {
+ var protocolType string
+ err = json.Unmarshal(*v, &protocolType)
+ if err != nil {
+ return err
+ }
+ chc.ProtocolType = &protocolType
+ }
+ case "minimumTlsVersion":
+ if v != nil {
+ var minimumTLSVersion MinimumTLSVersion
+ err = json.Unmarshal(*v, &minimumTLSVersion)
+ if err != nil {
+ return err
+ }
+ chc.MinimumTLSVersion = minimumTLSVersion
+ }
+ case "keyVaultCertificateSourceParameters":
+ if v != nil {
+ var keyVaultCertificateSourceParameters KeyVaultCertificateSourceParameters
+ err = json.Unmarshal(*v, &keyVaultCertificateSourceParameters)
+ if err != nil {
+ return err
+ }
+ chc.KeyVaultCertificateSourceParameters = &keyVaultCertificateSourceParameters
+ }
+ case "frontDoorCertificateSourceParameters":
+ if v != nil {
+ var certificateSourceParameters CertificateSourceParameters
+ err = json.Unmarshal(*v, &certificateSourceParameters)
+ if err != nil {
+ return err
+ }
+ chc.CertificateSourceParameters = &certificateSourceParameters
+ }
+ }
+ }
+
+ return nil
+}
+
+// CustomRule defines contents of a web application rule
+type CustomRule struct {
+ // Name - Describes the name of the rule.
+ Name *string `json:"name,omitempty"`
+ // Priority - Describes priority of the rule. Rules with a lower value will be evaluated before rules with a higher value.
+ Priority *int32 `json:"priority,omitempty"`
+ // EnabledState - Describes if the custom rule is in enabled or disabled state. Defaults to Enabled if not specified. Possible values include: 'CustomRuleEnabledStateDisabled', 'CustomRuleEnabledStateEnabled'
+ EnabledState CustomRuleEnabledState `json:"enabledState,omitempty"`
+ // RuleType - Describes type of rule. Possible values include: 'MatchRule', 'RateLimitRule'
+ RuleType RuleType `json:"ruleType,omitempty"`
+ // RateLimitDurationInMinutes - Time window for resetting the rate limit count. Default is 1 minute.
+ RateLimitDurationInMinutes *int32 `json:"rateLimitDurationInMinutes,omitempty"`
+ // RateLimitThreshold - Number of allowed requests per client within the time window.
+ RateLimitThreshold *int32 `json:"rateLimitThreshold,omitempty"`
+ // MatchConditions - List of match conditions.
+ MatchConditions *[]MatchCondition `json:"matchConditions,omitempty"`
+ // Action - Describes what action to be applied when rule matches. Possible values include: 'Allow', 'Block', 'Log', 'Redirect'
+ Action ActionType `json:"action,omitempty"`
+}
+
+// CustomRuleList defines contents of custom rules
+type CustomRuleList struct {
+ // Rules - List of rules
+ Rules *[]CustomRule `json:"rules,omitempty"`
+}
+
+// EndpointsPurgeContentFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type EndpointsPurgeContentFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *EndpointsPurgeContentFuture) Result(client EndpointsClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.EndpointsPurgeContentFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("frontdoor.EndpointsPurgeContentFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// Error ...
+type Error struct {
+ Code *string `json:"code,omitempty"`
+ Message *string `json:"message,omitempty"`
+ Target *string `json:"target,omitempty"`
+ Details *[]ErrorDetails `json:"details,omitempty"`
+ InnerError *string `json:"innerError,omitempty"`
+}
+
+// ErrorDetails ...
+type ErrorDetails struct {
+ Code *string `json:"code,omitempty"`
+ Target *string `json:"target,omitempty"`
+ Message *string `json:"message,omitempty"`
+}
+
+// ErrorResponse error response indicates Front Door service is not able to process the incoming request.
+// The reason is provided in the error message.
+type ErrorResponse struct {
+ // Code - READ-ONLY; Error code.
+ Code *string `json:"code,omitempty"`
+ // Message - READ-ONLY; Error message indicating why the operation failed.
+ Message *string `json:"message,omitempty"`
+}
+
+// ForwardingConfiguration describes Forwarding Route.
+type ForwardingConfiguration struct {
+ // CustomForwardingPath - A custom path used to rewrite resource paths matched by this rule. Leave empty to use incoming path.
+ CustomForwardingPath *string `json:"customForwardingPath,omitempty"`
+ // ForwardingProtocol - Protocol this rule will use when forwarding traffic to backends. Possible values include: 'HTTPOnly', 'HTTPSOnly', 'MatchRequest'
+ ForwardingProtocol ForwardingProtocol `json:"forwardingProtocol,omitempty"`
+ // CacheConfiguration - The caching configuration associated with this rule.
+ CacheConfiguration *CacheConfiguration `json:"cacheConfiguration,omitempty"`
+ // BackendPool - A reference to the BackendPool which this rule routes to.
+ BackendPool *SubResource `json:"backendPool,omitempty"`
+ // OdataType - Possible values include: 'OdataTypeRouteConfiguration', 'OdataTypeMicrosoftAzureFrontDoorModelsFrontdoorForwardingConfiguration', 'OdataTypeMicrosoftAzureFrontDoorModelsFrontdoorRedirectConfiguration'
+ OdataType OdataType `json:"@odata.type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ForwardingConfiguration.
+func (fc ForwardingConfiguration) MarshalJSON() ([]byte, error) {
+ fc.OdataType = OdataTypeMicrosoftAzureFrontDoorModelsFrontdoorForwardingConfiguration
+ objectMap := make(map[string]interface{})
+ if fc.CustomForwardingPath != nil {
+ objectMap["customForwardingPath"] = fc.CustomForwardingPath
+ }
+ if fc.ForwardingProtocol != "" {
+ objectMap["forwardingProtocol"] = fc.ForwardingProtocol
+ }
+ if fc.CacheConfiguration != nil {
+ objectMap["cacheConfiguration"] = fc.CacheConfiguration
+ }
+ if fc.BackendPool != nil {
+ objectMap["backendPool"] = fc.BackendPool
+ }
+ if fc.OdataType != "" {
+ objectMap["@odata.type"] = fc.OdataType
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsForwardingConfiguration is the BasicRouteConfiguration implementation for ForwardingConfiguration.
+func (fc ForwardingConfiguration) AsForwardingConfiguration() (*ForwardingConfiguration, bool) {
+ return &fc, true
+}
+
+// AsRedirectConfiguration is the BasicRouteConfiguration implementation for ForwardingConfiguration.
+func (fc ForwardingConfiguration) AsRedirectConfiguration() (*RedirectConfiguration, bool) {
+ return nil, false
+}
+
+// AsRouteConfiguration is the BasicRouteConfiguration implementation for ForwardingConfiguration.
+func (fc ForwardingConfiguration) AsRouteConfiguration() (*RouteConfiguration, bool) {
+ return nil, false
+}
+
+// AsBasicRouteConfiguration is the BasicRouteConfiguration implementation for ForwardingConfiguration.
+func (fc ForwardingConfiguration) AsBasicRouteConfiguration() (BasicRouteConfiguration, bool) {
+ return &fc, true
+}
+
+// FrontDoor front Door represents a collection of backend endpoints to route traffic to along with rules
+// that specify how traffic is sent there.
+type FrontDoor struct {
+ autorest.Response `json:"-"`
+ // Properties - Properties of the Front Door Load Balancer
+ *Properties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Resource ID.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Resource name.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Resource type.
+ Type *string `json:"type,omitempty"`
+ // Location - Resource location.
+ Location *string `json:"location,omitempty"`
+ // Tags - Resource tags.
+ Tags map[string]*string `json:"tags"`
+}
+
+// MarshalJSON is the custom marshaler for FrontDoor.
+func (fd FrontDoor) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if fd.Properties != nil {
+ objectMap["properties"] = fd.Properties
+ }
+ if fd.Location != nil {
+ objectMap["location"] = fd.Location
+ }
+ if fd.Tags != nil {
+ objectMap["tags"] = fd.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for FrontDoor struct.
+func (fd *FrontDoor) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var properties Properties
+ err = json.Unmarshal(*v, &properties)
+ if err != nil {
+ return err
+ }
+ fd.Properties = &properties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ fd.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ fd.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ fd.Type = &typeVar
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ fd.Location = &location
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ fd.Tags = tags
+ }
+ }
+ }
+
+ return nil
+}
+
+// FrontDoorsCreateOrUpdateFutureType an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type FrontDoorsCreateOrUpdateFutureType struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *FrontDoorsCreateOrUpdateFutureType) Result(client FrontDoorsClient) (fd FrontDoor, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontDoorsCreateOrUpdateFutureType", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("frontdoor.FrontDoorsCreateOrUpdateFutureType")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if fd.Response.Response, err = future.GetResult(sender); err == nil && fd.Response.Response.StatusCode != http.StatusNoContent {
+ fd, err = client.CreateOrUpdateResponder(fd.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontDoorsCreateOrUpdateFutureType", "Result", fd.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// FrontDoorsDeleteFutureType an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type FrontDoorsDeleteFutureType struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *FrontDoorsDeleteFutureType) Result(client FrontDoorsClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontDoorsDeleteFutureType", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("frontdoor.FrontDoorsDeleteFutureType")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// FrontendEndpoint a frontend endpoint used for routing.
+type FrontendEndpoint struct {
+ autorest.Response `json:"-"`
+ // FrontendEndpointProperties - Properties of the Frontend endpoint
+ *FrontendEndpointProperties `json:"properties,omitempty"`
+ // Name - Resource name.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Resource type.
+ Type *string `json:"type,omitempty"`
+ // ID - Resource ID.
+ ID *string `json:"id,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for FrontendEndpoint.
+func (fe FrontendEndpoint) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if fe.FrontendEndpointProperties != nil {
+ objectMap["properties"] = fe.FrontendEndpointProperties
+ }
+ if fe.Name != nil {
+ objectMap["name"] = fe.Name
+ }
+ if fe.ID != nil {
+ objectMap["id"] = fe.ID
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for FrontendEndpoint struct.
+func (fe *FrontendEndpoint) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var frontendEndpointProperties FrontendEndpointProperties
+ err = json.Unmarshal(*v, &frontendEndpointProperties)
+ if err != nil {
+ return err
+ }
+ fe.FrontendEndpointProperties = &frontendEndpointProperties
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ fe.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ fe.Type = &typeVar
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ fe.ID = &ID
+ }
+ }
+ }
+
+ return nil
+}
+
+// FrontendEndpointLink defines the Resource ID for a Frontend Endpoint.
+type FrontendEndpointLink struct {
+ // ID - Resource ID.
+ ID *string `json:"id,omitempty"`
+}
+
+// FrontendEndpointProperties the JSON object that contains the properties required to create a frontend
+// endpoint.
+type FrontendEndpointProperties struct {
+ // ResourceState - Resource status. Possible values include: 'ResourceStateCreating', 'ResourceStateEnabling', 'ResourceStateEnabled', 'ResourceStateDisabling', 'ResourceStateDisabled', 'ResourceStateDeleting'
+ ResourceState ResourceState `json:"resourceState,omitempty"`
+ // CustomHTTPSProvisioningState - READ-ONLY; Provisioning status of Custom Https of the frontendEndpoint. Possible values include: 'CustomHTTPSProvisioningStateEnabling', 'CustomHTTPSProvisioningStateEnabled', 'CustomHTTPSProvisioningStateDisabling', 'CustomHTTPSProvisioningStateDisabled', 'CustomHTTPSProvisioningStateFailed'
+ CustomHTTPSProvisioningState CustomHTTPSProvisioningState `json:"customHttpsProvisioningState,omitempty"`
+ // CustomHTTPSProvisioningSubstate - READ-ONLY; Provisioning substate shows the progress of custom HTTPS enabling/disabling process step by step. Possible values include: 'SubmittingDomainControlValidationRequest', 'PendingDomainControlValidationREquestApproval', 'DomainControlValidationRequestApproved', 'DomainControlValidationRequestRejected', 'DomainControlValidationRequestTimedOut', 'IssuingCertificate', 'DeployingCertificate', 'CertificateDeployed', 'DeletingCertificate', 'CertificateDeleted'
+ CustomHTTPSProvisioningSubstate CustomHTTPSProvisioningSubstate `json:"customHttpsProvisioningSubstate,omitempty"`
+ // CustomHTTPSConfiguration - READ-ONLY; The configuration specifying how to enable HTTPS
+ CustomHTTPSConfiguration *CustomHTTPSConfiguration `json:"customHttpsConfiguration,omitempty"`
+ // HostName - The host name of the frontendEndpoint. Must be a domain name.
+ HostName *string `json:"hostName,omitempty"`
+ // SessionAffinityEnabledState - Whether to allow session affinity on this host. Valid options are 'Enabled' or 'Disabled'. Possible values include: 'SessionAffinityEnabledStateEnabled', 'SessionAffinityEnabledStateDisabled'
+ SessionAffinityEnabledState SessionAffinityEnabledState `json:"sessionAffinityEnabledState,omitempty"`
+ // SessionAffinityTTLSeconds - UNUSED. This field will be ignored. The TTL to use in seconds for session affinity, if applicable.
+ SessionAffinityTTLSeconds *int32 `json:"sessionAffinityTtlSeconds,omitempty"`
+ // WebApplicationFirewallPolicyLink - Defines the Web Application Firewall policy for each host (if applicable)
+ WebApplicationFirewallPolicyLink *FrontendEndpointUpdateParametersWebApplicationFirewallPolicyLink `json:"webApplicationFirewallPolicyLink,omitempty"`
+}
+
+// FrontendEndpointsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type FrontendEndpointsCreateOrUpdateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *FrontendEndpointsCreateOrUpdateFuture) Result(client FrontendEndpointsClient) (fe FrontendEndpoint, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontendEndpointsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("frontdoor.FrontendEndpointsCreateOrUpdateFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if fe.Response.Response, err = future.GetResult(sender); err == nil && fe.Response.Response.StatusCode != http.StatusNoContent {
+ fe, err = client.CreateOrUpdateResponder(fe.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontendEndpointsCreateOrUpdateFuture", "Result", fe.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// FrontendEndpointsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type FrontendEndpointsDeleteFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *FrontendEndpointsDeleteFuture) Result(client FrontendEndpointsClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontendEndpointsDeleteFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("frontdoor.FrontendEndpointsDeleteFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// FrontendEndpointsDisableHTTPSFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type FrontendEndpointsDisableHTTPSFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *FrontendEndpointsDisableHTTPSFuture) Result(client FrontendEndpointsClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontendEndpointsDisableHTTPSFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("frontdoor.FrontendEndpointsDisableHTTPSFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// FrontendEndpointsEnableHTTPSFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type FrontendEndpointsEnableHTTPSFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *FrontendEndpointsEnableHTTPSFuture) Result(client FrontendEndpointsClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.FrontendEndpointsEnableHTTPSFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("frontdoor.FrontendEndpointsEnableHTTPSFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// FrontendEndpointsListResult result of the request to list frontend endpoints. It contains a list of
+// Frontend endpoint objects and a URL link to get the next set of results.
+type FrontendEndpointsListResult struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; List of Frontend endpoints within a Front Door.
+ Value *[]FrontendEndpoint `json:"value,omitempty"`
+ // NextLink - URL to get the next set of frontend endpoints if there are any.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// FrontendEndpointsListResultIterator provides access to a complete listing of FrontendEndpoint values.
+type FrontendEndpointsListResultIterator struct {
+ i int
+ page FrontendEndpointsListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *FrontendEndpointsListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FrontendEndpointsListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *FrontendEndpointsListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter FrontendEndpointsListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter FrontendEndpointsListResultIterator) Response() FrontendEndpointsListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter FrontendEndpointsListResultIterator) Value() FrontendEndpoint {
+ if !iter.page.NotDone() {
+ return FrontendEndpoint{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the FrontendEndpointsListResultIterator type.
+func NewFrontendEndpointsListResultIterator(page FrontendEndpointsListResultPage) FrontendEndpointsListResultIterator {
+ return FrontendEndpointsListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (felr FrontendEndpointsListResult) IsEmpty() bool {
+ return felr.Value == nil || len(*felr.Value) == 0
+}
+
+// frontendEndpointsListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (felr FrontendEndpointsListResult) frontendEndpointsListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if felr.NextLink == nil || len(to.String(felr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(felr.NextLink)))
+}
+
+// FrontendEndpointsListResultPage contains a page of FrontendEndpoint values.
+type FrontendEndpointsListResultPage struct {
+ fn func(context.Context, FrontendEndpointsListResult) (FrontendEndpointsListResult, error)
+ felr FrontendEndpointsListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *FrontendEndpointsListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FrontendEndpointsListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.felr)
+ if err != nil {
+ return err
+ }
+ page.felr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *FrontendEndpointsListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page FrontendEndpointsListResultPage) NotDone() bool {
+ return !page.felr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page FrontendEndpointsListResultPage) Response() FrontendEndpointsListResult {
+ return page.felr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page FrontendEndpointsListResultPage) Values() []FrontendEndpoint {
+ if page.felr.IsEmpty() {
+ return nil
+ }
+ return *page.felr.Value
+}
+
+// Creates a new instance of the FrontendEndpointsListResultPage type.
+func NewFrontendEndpointsListResultPage(getNextPage func(context.Context, FrontendEndpointsListResult) (FrontendEndpointsListResult, error)) FrontendEndpointsListResultPage {
+ return FrontendEndpointsListResultPage{fn: getNextPage}
+}
+
+// FrontendEndpointUpdateParameters frontend endpoint used in routing rule
+type FrontendEndpointUpdateParameters struct {
+ // HostName - The host name of the frontendEndpoint. Must be a domain name.
+ HostName *string `json:"hostName,omitempty"`
+ // SessionAffinityEnabledState - Whether to allow session affinity on this host. Valid options are 'Enabled' or 'Disabled'. Possible values include: 'SessionAffinityEnabledStateEnabled', 'SessionAffinityEnabledStateDisabled'
+ SessionAffinityEnabledState SessionAffinityEnabledState `json:"sessionAffinityEnabledState,omitempty"`
+ // SessionAffinityTTLSeconds - UNUSED. This field will be ignored. The TTL to use in seconds for session affinity, if applicable.
+ SessionAffinityTTLSeconds *int32 `json:"sessionAffinityTtlSeconds,omitempty"`
+ // WebApplicationFirewallPolicyLink - Defines the Web Application Firewall policy for each host (if applicable)
+ WebApplicationFirewallPolicyLink *FrontendEndpointUpdateParametersWebApplicationFirewallPolicyLink `json:"webApplicationFirewallPolicyLink,omitempty"`
+}
+
+// FrontendEndpointUpdateParametersWebApplicationFirewallPolicyLink defines the Web Application Firewall
+// policy for each host (if applicable)
+type FrontendEndpointUpdateParametersWebApplicationFirewallPolicyLink struct {
+ // ID - Resource ID.
+ ID *string `json:"id,omitempty"`
+}
+
+// HealthProbeSettingsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type HealthProbeSettingsCreateOrUpdateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *HealthProbeSettingsCreateOrUpdateFuture) Result(client HealthProbeSettingsClient) (hpsm HealthProbeSettingsModel, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.HealthProbeSettingsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("frontdoor.HealthProbeSettingsCreateOrUpdateFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if hpsm.Response.Response, err = future.GetResult(sender); err == nil && hpsm.Response.Response.StatusCode != http.StatusNoContent {
+ hpsm, err = client.CreateOrUpdateResponder(hpsm.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.HealthProbeSettingsCreateOrUpdateFuture", "Result", hpsm.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// HealthProbeSettingsDeleteFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type HealthProbeSettingsDeleteFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *HealthProbeSettingsDeleteFuture) Result(client HealthProbeSettingsClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.HealthProbeSettingsDeleteFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("frontdoor.HealthProbeSettingsDeleteFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// HealthProbeSettingsListResult result of the request to list HealthProbeSettings. It contains a list of
+// HealthProbeSettings objects and a URL link to get the next set of results.
+type HealthProbeSettingsListResult struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; List of HealthProbeSettings within a Front Door.
+ Value *[]HealthProbeSettingsModel `json:"value,omitempty"`
+ // NextLink - URL to get the next set of HealthProbeSettings objects if there are any.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// HealthProbeSettingsListResultIterator provides access to a complete listing of HealthProbeSettingsModel
+// values.
+type HealthProbeSettingsListResultIterator struct {
+ i int
+ page HealthProbeSettingsListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *HealthProbeSettingsListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/HealthProbeSettingsListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *HealthProbeSettingsListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter HealthProbeSettingsListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter HealthProbeSettingsListResultIterator) Response() HealthProbeSettingsListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter HealthProbeSettingsListResultIterator) Value() HealthProbeSettingsModel {
+ if !iter.page.NotDone() {
+ return HealthProbeSettingsModel{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the HealthProbeSettingsListResultIterator type.
+func NewHealthProbeSettingsListResultIterator(page HealthProbeSettingsListResultPage) HealthProbeSettingsListResultIterator {
+ return HealthProbeSettingsListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (hpslr HealthProbeSettingsListResult) IsEmpty() bool {
+ return hpslr.Value == nil || len(*hpslr.Value) == 0
+}
+
+// healthProbeSettingsListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (hpslr HealthProbeSettingsListResult) healthProbeSettingsListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if hpslr.NextLink == nil || len(to.String(hpslr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(hpslr.NextLink)))
+}
+
+// HealthProbeSettingsListResultPage contains a page of HealthProbeSettingsModel values.
+type HealthProbeSettingsListResultPage struct {
+ fn func(context.Context, HealthProbeSettingsListResult) (HealthProbeSettingsListResult, error)
+ hpslr HealthProbeSettingsListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *HealthProbeSettingsListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/HealthProbeSettingsListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.hpslr)
+ if err != nil {
+ return err
+ }
+ page.hpslr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *HealthProbeSettingsListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page HealthProbeSettingsListResultPage) NotDone() bool {
+ return !page.hpslr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page HealthProbeSettingsListResultPage) Response() HealthProbeSettingsListResult {
+ return page.hpslr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page HealthProbeSettingsListResultPage) Values() []HealthProbeSettingsModel {
+ if page.hpslr.IsEmpty() {
+ return nil
+ }
+ return *page.hpslr.Value
+}
+
+// Creates a new instance of the HealthProbeSettingsListResultPage type.
+func NewHealthProbeSettingsListResultPage(getNextPage func(context.Context, HealthProbeSettingsListResult) (HealthProbeSettingsListResult, error)) HealthProbeSettingsListResultPage {
+ return HealthProbeSettingsListResultPage{fn: getNextPage}
+}
+
+// HealthProbeSettingsModel load balancing settings for a backend pool
+type HealthProbeSettingsModel struct {
+ autorest.Response `json:"-"`
+ // HealthProbeSettingsProperties - Properties of the health probe settings
+ *HealthProbeSettingsProperties `json:"properties,omitempty"`
+ // Name - Resource name.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Resource type.
+ Type *string `json:"type,omitempty"`
+ // ID - Resource ID.
+ ID *string `json:"id,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for HealthProbeSettingsModel.
+func (hpsm HealthProbeSettingsModel) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if hpsm.HealthProbeSettingsProperties != nil {
+ objectMap["properties"] = hpsm.HealthProbeSettingsProperties
+ }
+ if hpsm.Name != nil {
+ objectMap["name"] = hpsm.Name
+ }
+ if hpsm.ID != nil {
+ objectMap["id"] = hpsm.ID
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for HealthProbeSettingsModel struct.
+func (hpsm *HealthProbeSettingsModel) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var healthProbeSettingsProperties HealthProbeSettingsProperties
+ err = json.Unmarshal(*v, &healthProbeSettingsProperties)
+ if err != nil {
+ return err
+ }
+ hpsm.HealthProbeSettingsProperties = &healthProbeSettingsProperties
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ hpsm.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ hpsm.Type = &typeVar
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ hpsm.ID = &ID
+ }
+ }
+ }
+
+ return nil
+}
+
+// HealthProbeSettingsProperties the JSON object that contains the properties required to create a health
+// probe settings.
+type HealthProbeSettingsProperties struct {
+ // ResourceState - Resource status. Possible values include: 'ResourceStateCreating', 'ResourceStateEnabling', 'ResourceStateEnabled', 'ResourceStateDisabling', 'ResourceStateDisabled', 'ResourceStateDeleting'
+ ResourceState ResourceState `json:"resourceState,omitempty"`
+ // Path - The path to use for the health probe. Default is /
+ Path *string `json:"path,omitempty"`
+ // Protocol - Protocol scheme to use for this probe. Possible values include: 'HTTP', 'HTTPS'
+ Protocol Protocol `json:"protocol,omitempty"`
+ // IntervalInSeconds - The number of seconds between health probes.
+ IntervalInSeconds *int32 `json:"intervalInSeconds,omitempty"`
+ // HealthProbeMethod - Configures which HTTP method to use to probe the backends defined under backendPools. Possible values include: 'GET', 'HEAD'
+ HealthProbeMethod HealthProbeMethod `json:"healthProbeMethod,omitempty"`
+ // EnabledState - Whether to enable health probes to be made against backends defined under backendPools. Health probes can only be disabled if there is a single enabled backend in single enabled backend pool. Possible values include: 'HealthProbeEnabledEnabled', 'HealthProbeEnabledDisabled'
+ EnabledState HealthProbeEnabled `json:"enabledState,omitempty"`
+}
+
+// HealthProbeSettingsUpdateParameters l7 health probe settings for a backend pool
+type HealthProbeSettingsUpdateParameters struct {
+ // Path - The path to use for the health probe. Default is /
+ Path *string `json:"path,omitempty"`
+ // Protocol - Protocol scheme to use for this probe. Possible values include: 'HTTP', 'HTTPS'
+ Protocol Protocol `json:"protocol,omitempty"`
+ // IntervalInSeconds - The number of seconds between health probes.
+ IntervalInSeconds *int32 `json:"intervalInSeconds,omitempty"`
+ // HealthProbeMethod - Configures which HTTP method to use to probe the backends defined under backendPools. Possible values include: 'GET', 'HEAD'
+ HealthProbeMethod HealthProbeMethod `json:"healthProbeMethod,omitempty"`
+ // EnabledState - Whether to enable health probes to be made against backends defined under backendPools. Health probes can only be disabled if there is a single enabled backend in single enabled backend pool. Possible values include: 'HealthProbeEnabledEnabled', 'HealthProbeEnabledDisabled'
+ EnabledState HealthProbeEnabled `json:"enabledState,omitempty"`
+}
+
+// KeyVaultCertificateSourceParameters parameters required for bring-your-own-certification via Key Vault
+type KeyVaultCertificateSourceParameters struct {
+ // Vault - The Key Vault containing the SSL certificate
+ Vault *KeyVaultCertificateSourceParametersVault `json:"vault,omitempty"`
+ // SecretName - The name of the Key Vault secret representing the full certificate PFX
+ SecretName *string `json:"secretName,omitempty"`
+ // SecretVersion - The version of the Key Vault secret representing the full certificate PFX
+ SecretVersion *string `json:"secretVersion,omitempty"`
+}
+
+// KeyVaultCertificateSourceParametersVault the Key Vault containing the SSL certificate
+type KeyVaultCertificateSourceParametersVault struct {
+ // ID - Resource ID.
+ ID *string `json:"id,omitempty"`
+}
+
+// ListResult result of the request to list Front Doors. It contains a list of Front Door objects and a URL
+// link to get the next set of results.
+type ListResult struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; List of Front Doors within a resource group.
+ Value *[]FrontDoor `json:"value,omitempty"`
+ // NextLink - URL to get the next set of Front Door objects if there are any.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// ListResultIterator provides access to a complete listing of FrontDoor values.
+type ListResultIterator struct {
+ i int
+ page ListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *ListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *ListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter ListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter ListResultIterator) Response() ListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter ListResultIterator) Value() FrontDoor {
+ if !iter.page.NotDone() {
+ return FrontDoor{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the ListResultIterator type.
+func NewListResultIterator(page ListResultPage) ListResultIterator {
+ return ListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (lr ListResult) IsEmpty() bool {
+ return lr.Value == nil || len(*lr.Value) == 0
+}
+
+// listResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (lr ListResult) listResultPreparer(ctx context.Context) (*http.Request, error) {
+ if lr.NextLink == nil || len(to.String(lr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(lr.NextLink)))
+}
+
+// ListResultPage contains a page of FrontDoor values.
+type ListResultPage struct {
+ fn func(context.Context, ListResult) (ListResult, error)
+ lr ListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *ListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.lr)
+ if err != nil {
+ return err
+ }
+ page.lr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *ListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page ListResultPage) NotDone() bool {
+ return !page.lr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page ListResultPage) Response() ListResult {
+ return page.lr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page ListResultPage) Values() []FrontDoor {
+ if page.lr.IsEmpty() {
+ return nil
+ }
+ return *page.lr.Value
+}
+
+// Creates a new instance of the ListResultPage type.
+func NewListResultPage(getNextPage func(context.Context, ListResult) (ListResult, error)) ListResultPage {
+ return ListResultPage{fn: getNextPage}
+}
+
+// LoadBalancingSettingsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type LoadBalancingSettingsCreateOrUpdateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *LoadBalancingSettingsCreateOrUpdateFuture) Result(client LoadBalancingSettingsClient) (lbsm LoadBalancingSettingsModel, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.LoadBalancingSettingsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("frontdoor.LoadBalancingSettingsCreateOrUpdateFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if lbsm.Response.Response, err = future.GetResult(sender); err == nil && lbsm.Response.Response.StatusCode != http.StatusNoContent {
+ lbsm, err = client.CreateOrUpdateResponder(lbsm.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.LoadBalancingSettingsCreateOrUpdateFuture", "Result", lbsm.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// LoadBalancingSettingsDeleteFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type LoadBalancingSettingsDeleteFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *LoadBalancingSettingsDeleteFuture) Result(client LoadBalancingSettingsClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.LoadBalancingSettingsDeleteFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("frontdoor.LoadBalancingSettingsDeleteFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// LoadBalancingSettingsListResult result of the request to list load balancing settings. It contains a
+// list of load balancing settings objects and a URL link to get the next set of results.
+type LoadBalancingSettingsListResult struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; List of Backend Pools within a Front Door.
+ Value *[]LoadBalancingSettingsModel `json:"value,omitempty"`
+ // NextLink - URL to get the next set of LoadBalancingSettings objects if there are any.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// LoadBalancingSettingsListResultIterator provides access to a complete listing of
+// LoadBalancingSettingsModel values.
+type LoadBalancingSettingsListResultIterator struct {
+ i int
+ page LoadBalancingSettingsListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *LoadBalancingSettingsListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/LoadBalancingSettingsListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *LoadBalancingSettingsListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter LoadBalancingSettingsListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter LoadBalancingSettingsListResultIterator) Response() LoadBalancingSettingsListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter LoadBalancingSettingsListResultIterator) Value() LoadBalancingSettingsModel {
+ if !iter.page.NotDone() {
+ return LoadBalancingSettingsModel{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the LoadBalancingSettingsListResultIterator type.
+func NewLoadBalancingSettingsListResultIterator(page LoadBalancingSettingsListResultPage) LoadBalancingSettingsListResultIterator {
+ return LoadBalancingSettingsListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (lbslr LoadBalancingSettingsListResult) IsEmpty() bool {
+ return lbslr.Value == nil || len(*lbslr.Value) == 0
+}
+
+// loadBalancingSettingsListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (lbslr LoadBalancingSettingsListResult) loadBalancingSettingsListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if lbslr.NextLink == nil || len(to.String(lbslr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(lbslr.NextLink)))
+}
+
+// LoadBalancingSettingsListResultPage contains a page of LoadBalancingSettingsModel values.
+type LoadBalancingSettingsListResultPage struct {
+ fn func(context.Context, LoadBalancingSettingsListResult) (LoadBalancingSettingsListResult, error)
+ lbslr LoadBalancingSettingsListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *LoadBalancingSettingsListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/LoadBalancingSettingsListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.lbslr)
+ if err != nil {
+ return err
+ }
+ page.lbslr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *LoadBalancingSettingsListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page LoadBalancingSettingsListResultPage) NotDone() bool {
+ return !page.lbslr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page LoadBalancingSettingsListResultPage) Response() LoadBalancingSettingsListResult {
+ return page.lbslr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page LoadBalancingSettingsListResultPage) Values() []LoadBalancingSettingsModel {
+ if page.lbslr.IsEmpty() {
+ return nil
+ }
+ return *page.lbslr.Value
+}
+
+// Creates a new instance of the LoadBalancingSettingsListResultPage type.
+func NewLoadBalancingSettingsListResultPage(getNextPage func(context.Context, LoadBalancingSettingsListResult) (LoadBalancingSettingsListResult, error)) LoadBalancingSettingsListResultPage {
+ return LoadBalancingSettingsListResultPage{fn: getNextPage}
+}
+
+// LoadBalancingSettingsModel load balancing settings for a backend pool
+type LoadBalancingSettingsModel struct {
+ autorest.Response `json:"-"`
+ // LoadBalancingSettingsProperties - Properties of the load balancing settings
+ *LoadBalancingSettingsProperties `json:"properties,omitempty"`
+ // Name - Resource name.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Resource type.
+ Type *string `json:"type,omitempty"`
+ // ID - Resource ID.
+ ID *string `json:"id,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for LoadBalancingSettingsModel.
+func (lbsm LoadBalancingSettingsModel) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if lbsm.LoadBalancingSettingsProperties != nil {
+ objectMap["properties"] = lbsm.LoadBalancingSettingsProperties
+ }
+ if lbsm.Name != nil {
+ objectMap["name"] = lbsm.Name
+ }
+ if lbsm.ID != nil {
+ objectMap["id"] = lbsm.ID
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for LoadBalancingSettingsModel struct.
+func (lbsm *LoadBalancingSettingsModel) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var loadBalancingSettingsProperties LoadBalancingSettingsProperties
+ err = json.Unmarshal(*v, &loadBalancingSettingsProperties)
+ if err != nil {
+ return err
+ }
+ lbsm.LoadBalancingSettingsProperties = &loadBalancingSettingsProperties
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ lbsm.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ lbsm.Type = &typeVar
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ lbsm.ID = &ID
+ }
+ }
+ }
+
+ return nil
+}
+
+// LoadBalancingSettingsProperties the JSON object that contains the properties required to create load
+// balancing settings
+type LoadBalancingSettingsProperties struct {
+ // ResourceState - Resource status. Possible values include: 'ResourceStateCreating', 'ResourceStateEnabling', 'ResourceStateEnabled', 'ResourceStateDisabling', 'ResourceStateDisabled', 'ResourceStateDeleting'
+ ResourceState ResourceState `json:"resourceState,omitempty"`
+ // SampleSize - The number of samples to consider for load balancing decisions
+ SampleSize *int32 `json:"sampleSize,omitempty"`
+ // SuccessfulSamplesRequired - The number of samples within the sample period that must succeed
+ SuccessfulSamplesRequired *int32 `json:"successfulSamplesRequired,omitempty"`
+ // AdditionalLatencyMilliseconds - The additional latency in milliseconds for probes to fall into the lowest latency bucket
+ AdditionalLatencyMilliseconds *int32 `json:"additionalLatencyMilliseconds,omitempty"`
+}
+
+// LoadBalancingSettingsUpdateParameters round-Robin load balancing settings for a backend pool
+type LoadBalancingSettingsUpdateParameters struct {
+ // SampleSize - The number of samples to consider for load balancing decisions
+ SampleSize *int32 `json:"sampleSize,omitempty"`
+ // SuccessfulSamplesRequired - The number of samples within the sample period that must succeed
+ SuccessfulSamplesRequired *int32 `json:"successfulSamplesRequired,omitempty"`
+ // AdditionalLatencyMilliseconds - The additional latency in milliseconds for probes to fall into the lowest latency bucket
+ AdditionalLatencyMilliseconds *int32 `json:"additionalLatencyMilliseconds,omitempty"`
+}
+
+// ManagedRuleDefinition describes a managed rule definition.
+type ManagedRuleDefinition struct {
+ // RuleID - READ-ONLY; Identifier for the managed rule.
+ RuleID *string `json:"ruleId,omitempty"`
+ // Description - READ-ONLY; Describes the functionality of the managed rule.
+ Description *string `json:"description,omitempty"`
+}
+
+// ManagedRuleGroupDefinition describes a managed rule group.
+type ManagedRuleGroupDefinition struct {
+ // RuleGroupName - READ-ONLY; Name of the managed rule group.
+ RuleGroupName *string `json:"ruleGroupName,omitempty"`
+ // Description - READ-ONLY; Description of the managed rule group.
+ Description *string `json:"description,omitempty"`
+ // Rules - READ-ONLY; List of rules within the managed rule group.
+ Rules *[]ManagedRuleDefinition `json:"rules,omitempty"`
+}
+
+// ManagedRuleGroupOverride defines a managed rule group override setting.
+type ManagedRuleGroupOverride struct {
+ // RuleGroupName - Describes the managed rule group to override.
+ RuleGroupName *string `json:"ruleGroupName,omitempty"`
+ // Rules - List of rules that will be disabled. If none specified, all rules in the group will be disabled.
+ Rules *[]ManagedRuleOverride `json:"rules,omitempty"`
+}
+
+// ManagedRuleOverride defines a managed rule group override setting.
+type ManagedRuleOverride struct {
+ // RuleID - Identifier for the managed rule.
+ RuleID *string `json:"ruleId,omitempty"`
+ // EnabledState - Describes if the managed rule is in enabled or disabled state. Defaults to Disabled if not specified. Possible values include: 'ManagedRuleEnabledStateDisabled', 'ManagedRuleEnabledStateEnabled'
+ EnabledState ManagedRuleEnabledState `json:"enabledState,omitempty"`
+ // Action - Describes the override action to be applied when rule matches. Possible values include: 'Allow', 'Block', 'Log', 'Redirect'
+ Action ActionType `json:"action,omitempty"`
+}
+
+// ManagedRuleSet defines a managed rule set.
+type ManagedRuleSet struct {
+ // RuleSetType - Defines the rule set type to use.
+ RuleSetType *string `json:"ruleSetType,omitempty"`
+ // RuleSetVersion - Defines the version of the rule set to use.
+ RuleSetVersion *string `json:"ruleSetVersion,omitempty"`
+ // RuleGroupOverrides - Defines the rule group overrides to apply to the rule set.
+ RuleGroupOverrides *[]ManagedRuleGroupOverride `json:"ruleGroupOverrides,omitempty"`
+}
+
+// ManagedRuleSetDefinition describes the a managed rule set definition.
+type ManagedRuleSetDefinition struct {
+ // ManagedRuleSetDefinitionProperties - Properties for a managed rule set definition.
+ *ManagedRuleSetDefinitionProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Resource ID.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Resource name.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Resource type.
+ Type *string `json:"type,omitempty"`
+ // Location - Resource location.
+ Location *string `json:"location,omitempty"`
+ // Tags - Resource tags.
+ Tags map[string]*string `json:"tags"`
+}
+
+// MarshalJSON is the custom marshaler for ManagedRuleSetDefinition.
+func (mrsd ManagedRuleSetDefinition) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if mrsd.ManagedRuleSetDefinitionProperties != nil {
+ objectMap["properties"] = mrsd.ManagedRuleSetDefinitionProperties
+ }
+ if mrsd.Location != nil {
+ objectMap["location"] = mrsd.Location
+ }
+ if mrsd.Tags != nil {
+ objectMap["tags"] = mrsd.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ManagedRuleSetDefinition struct.
+func (mrsd *ManagedRuleSetDefinition) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var managedRuleSetDefinitionProperties ManagedRuleSetDefinitionProperties
+ err = json.Unmarshal(*v, &managedRuleSetDefinitionProperties)
+ if err != nil {
+ return err
+ }
+ mrsd.ManagedRuleSetDefinitionProperties = &managedRuleSetDefinitionProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ mrsd.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ mrsd.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ mrsd.Type = &typeVar
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ mrsd.Location = &location
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ mrsd.Tags = tags
+ }
+ }
+ }
+
+ return nil
+}
+
+// ManagedRuleSetDefinitionList list of managed rule set definitions available for use in a policy.
+type ManagedRuleSetDefinitionList struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; List of managed rule set definitions.
+ Value *[]ManagedRuleSetDefinition `json:"value,omitempty"`
+ // NextLink - URL to retrieve next set of managed rule set definitions.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// ManagedRuleSetDefinitionListIterator provides access to a complete listing of ManagedRuleSetDefinition
+// values.
+type ManagedRuleSetDefinitionListIterator struct {
+ i int
+ page ManagedRuleSetDefinitionListPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *ManagedRuleSetDefinitionListIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedRuleSetDefinitionListIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *ManagedRuleSetDefinitionListIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter ManagedRuleSetDefinitionListIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter ManagedRuleSetDefinitionListIterator) Response() ManagedRuleSetDefinitionList {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter ManagedRuleSetDefinitionListIterator) Value() ManagedRuleSetDefinition {
+ if !iter.page.NotDone() {
+ return ManagedRuleSetDefinition{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the ManagedRuleSetDefinitionListIterator type.
+func NewManagedRuleSetDefinitionListIterator(page ManagedRuleSetDefinitionListPage) ManagedRuleSetDefinitionListIterator {
+ return ManagedRuleSetDefinitionListIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (mrsdl ManagedRuleSetDefinitionList) IsEmpty() bool {
+ return mrsdl.Value == nil || len(*mrsdl.Value) == 0
+}
+
+// managedRuleSetDefinitionListPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (mrsdl ManagedRuleSetDefinitionList) managedRuleSetDefinitionListPreparer(ctx context.Context) (*http.Request, error) {
+ if mrsdl.NextLink == nil || len(to.String(mrsdl.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(mrsdl.NextLink)))
+}
+
+// ManagedRuleSetDefinitionListPage contains a page of ManagedRuleSetDefinition values.
+type ManagedRuleSetDefinitionListPage struct {
+ fn func(context.Context, ManagedRuleSetDefinitionList) (ManagedRuleSetDefinitionList, error)
+ mrsdl ManagedRuleSetDefinitionList
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *ManagedRuleSetDefinitionListPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedRuleSetDefinitionListPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.mrsdl)
+ if err != nil {
+ return err
+ }
+ page.mrsdl = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *ManagedRuleSetDefinitionListPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page ManagedRuleSetDefinitionListPage) NotDone() bool {
+ return !page.mrsdl.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page ManagedRuleSetDefinitionListPage) Response() ManagedRuleSetDefinitionList {
+ return page.mrsdl
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page ManagedRuleSetDefinitionListPage) Values() []ManagedRuleSetDefinition {
+ if page.mrsdl.IsEmpty() {
+ return nil
+ }
+ return *page.mrsdl.Value
+}
+
+// Creates a new instance of the ManagedRuleSetDefinitionListPage type.
+func NewManagedRuleSetDefinitionListPage(getNextPage func(context.Context, ManagedRuleSetDefinitionList) (ManagedRuleSetDefinitionList, error)) ManagedRuleSetDefinitionListPage {
+ return ManagedRuleSetDefinitionListPage{fn: getNextPage}
+}
+
+// ManagedRuleSetDefinitionProperties properties for a managed rule set definition.
+type ManagedRuleSetDefinitionProperties struct {
+ // ProvisioningState - READ-ONLY; Provisioning state of the managed rule set.
+ ProvisioningState *string `json:"provisioningState,omitempty"`
+ // RuleSetType - READ-ONLY; Type of the managed rule set.
+ RuleSetType *string `json:"ruleSetType,omitempty"`
+ // RuleSetVersion - READ-ONLY; Version of the managed rule set type.
+ RuleSetVersion *string `json:"ruleSetVersion,omitempty"`
+ // RuleGroups - READ-ONLY; Rule groups of the managed rule set.
+ RuleGroups *[]ManagedRuleGroupDefinition `json:"ruleGroups,omitempty"`
+}
+
+// ManagedRuleSetList defines the list of managed rule sets for the policy.
+type ManagedRuleSetList struct {
+ // ManagedRuleSets - List of rule sets.
+ ManagedRuleSets *[]ManagedRuleSet `json:"managedRuleSets,omitempty"`
+}
+
+// MatchCondition define a match condition.
+type MatchCondition struct {
+ // MatchVariable - Request variable to compare with. Possible values include: 'RemoteAddr', 'RequestMethod', 'QueryString', 'PostArgs', 'RequestURI', 'RequestHeader', 'RequestBody', 'Cookies', 'SocketAddr'
+ MatchVariable MatchVariable `json:"matchVariable,omitempty"`
+ // Selector - Match against a specific key from the QueryString, PostArgs, RequestHeader or Cookies variables. Default is null.
+ Selector *string `json:"selector,omitempty"`
+ // Operator - Comparison type to use for matching with the variable value. Possible values include: 'Any', 'IPMatch', 'GeoMatch', 'Equal', 'Contains', 'LessThan', 'GreaterThan', 'LessThanOrEqual', 'GreaterThanOrEqual', 'BeginsWith', 'EndsWith', 'RegEx'
+ Operator Operator `json:"operator,omitempty"`
+ // NegateCondition - Describes if the result of this condition should be negated.
+ NegateCondition *bool `json:"negateCondition,omitempty"`
+ // MatchValue - List of possible match values.
+ MatchValue *[]string `json:"matchValue,omitempty"`
+ // Transforms - List of transforms.
+ Transforms *[]TransformType `json:"transforms,omitempty"`
+}
+
+// PoliciesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type PoliciesCreateOrUpdateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *PoliciesCreateOrUpdateFuture) Result(client PoliciesClient) (wafp WebApplicationFirewallPolicy, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.PoliciesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("frontdoor.PoliciesCreateOrUpdateFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if wafp.Response.Response, err = future.GetResult(sender); err == nil && wafp.Response.Response.StatusCode != http.StatusNoContent {
+ wafp, err = client.CreateOrUpdateResponder(wafp.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.PoliciesCreateOrUpdateFuture", "Result", wafp.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// PoliciesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type PoliciesDeleteFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *PoliciesDeleteFuture) Result(client PoliciesClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.PoliciesDeleteFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("frontdoor.PoliciesDeleteFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// PolicySettings defines top-level WebApplicationFirewallPolicy configuration settings.
+type PolicySettings struct {
+ // EnabledState - Describes if the policy is in enabled or disabled state. Defaults to Enabled if not specified. Possible values include: 'PolicyEnabledStateDisabled', 'PolicyEnabledStateEnabled'
+ EnabledState PolicyEnabledState `json:"enabledState,omitempty"`
+ // Mode - Describes if it is in detection mode or prevention mode at policy level. Possible values include: 'Prevention', 'Detection'
+ Mode PolicyMode `json:"mode,omitempty"`
+ // RedirectURL - If action type is redirect, this field represents redirect URL for the client.
+ RedirectURL *string `json:"redirectUrl,omitempty"`
+ // CustomBlockResponseStatusCode - If the action type is block, customer can override the response status code.
+ CustomBlockResponseStatusCode *int32 `json:"customBlockResponseStatusCode,omitempty"`
+ // CustomBlockResponseBody - If the action type is block, customer can override the response body. The body must be specified in base64 encoding.
+ CustomBlockResponseBody *string `json:"customBlockResponseBody,omitempty"`
+}
+
+// Properties the JSON object that contains the properties required to create an endpoint.
+type Properties struct {
+ // ResourceState - Resource status of the Front Door. Possible values include: 'ResourceStateCreating', 'ResourceStateEnabling', 'ResourceStateEnabled', 'ResourceStateDisabling', 'ResourceStateDisabled', 'ResourceStateDeleting'
+ ResourceState ResourceState `json:"resourceState,omitempty"`
+ // ProvisioningState - READ-ONLY; Provisioning state of the Front Door.
+ ProvisioningState *string `json:"provisioningState,omitempty"`
+ // Cname - READ-ONLY; The host that each frontendEndpoint must CNAME to.
+ Cname *string `json:"cname,omitempty"`
+ // FriendlyName - A friendly name for the frontDoor
+ FriendlyName *string `json:"friendlyName,omitempty"`
+ // RoutingRules - Routing rules associated with this Front Door.
+ RoutingRules *[]RoutingRule `json:"routingRules,omitempty"`
+ // LoadBalancingSettings - Load balancing settings associated with this Front Door instance.
+ LoadBalancingSettings *[]LoadBalancingSettingsModel `json:"loadBalancingSettings,omitempty"`
+ // HealthProbeSettings - Health probe settings associated with this Front Door instance.
+ HealthProbeSettings *[]HealthProbeSettingsModel `json:"healthProbeSettings,omitempty"`
+ // BackendPools - Backend pools available to routing rules.
+ BackendPools *[]BackendPool `json:"backendPools,omitempty"`
+ // FrontendEndpoints - Frontend endpoints available to routing rules.
+ FrontendEndpoints *[]FrontendEndpoint `json:"frontendEndpoints,omitempty"`
+ // BackendPoolsSettings - Settings for all backendPools
+ BackendPoolsSettings *BackendPoolsSettings `json:"backendPoolsSettings,omitempty"`
+ // EnabledState - Operational status of the Front Door load balancer. Permitted values are 'Enabled' or 'Disabled'. Possible values include: 'EnabledStateEnabled', 'EnabledStateDisabled'
+ EnabledState EnabledState `json:"enabledState,omitempty"`
+}
+
+// PurgeParameters parameters required for content purge.
+type PurgeParameters struct {
+ // ContentPaths - The path to the content to be purged. Can describe a file path or a wild card directory.
+ ContentPaths *[]string `json:"contentPaths,omitempty"`
+}
+
+// RedirectConfiguration describes Redirect Route.
+type RedirectConfiguration struct {
+ // RedirectType - The redirect type the rule will use when redirecting traffic. Possible values include: 'Moved', 'Found', 'TemporaryRedirect', 'PermanentRedirect'
+ RedirectType RedirectType `json:"redirectType,omitempty"`
+ // RedirectProtocol - The protocol of the destination to where the traffic is redirected. Possible values include: 'RedirectProtocolHTTPOnly', 'RedirectProtocolHTTPSOnly', 'RedirectProtocolMatchRequest'
+ RedirectProtocol RedirectProtocol `json:"redirectProtocol,omitempty"`
+ // CustomHost - Host to redirect. Leave empty to use the incoming host as the destination host.
+ CustomHost *string `json:"customHost,omitempty"`
+ // CustomPath - The full path to redirect. Path cannot be empty and must start with /. Leave empty to use the incoming path as destination path.
+ CustomPath *string `json:"customPath,omitempty"`
+ // CustomFragment - Fragment to add to the redirect URL. Fragment is the part of the URL that comes after #. Do not include the #.
+ CustomFragment *string `json:"customFragment,omitempty"`
+ // CustomQueryString - The set of query strings to be placed in the redirect URL. Setting this value would replace any existing query string; leave empty to preserve the incoming query string. Query string must be in = format. The first ? and & will be added automatically so do not include them in the front, but do separate multiple query strings with &.
+ CustomQueryString *string `json:"customQueryString,omitempty"`
+ // OdataType - Possible values include: 'OdataTypeRouteConfiguration', 'OdataTypeMicrosoftAzureFrontDoorModelsFrontdoorForwardingConfiguration', 'OdataTypeMicrosoftAzureFrontDoorModelsFrontdoorRedirectConfiguration'
+ OdataType OdataType `json:"@odata.type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for RedirectConfiguration.
+func (rc RedirectConfiguration) MarshalJSON() ([]byte, error) {
+ rc.OdataType = OdataTypeMicrosoftAzureFrontDoorModelsFrontdoorRedirectConfiguration
+ objectMap := make(map[string]interface{})
+ if rc.RedirectType != "" {
+ objectMap["redirectType"] = rc.RedirectType
+ }
+ if rc.RedirectProtocol != "" {
+ objectMap["redirectProtocol"] = rc.RedirectProtocol
+ }
+ if rc.CustomHost != nil {
+ objectMap["customHost"] = rc.CustomHost
+ }
+ if rc.CustomPath != nil {
+ objectMap["customPath"] = rc.CustomPath
+ }
+ if rc.CustomFragment != nil {
+ objectMap["customFragment"] = rc.CustomFragment
+ }
+ if rc.CustomQueryString != nil {
+ objectMap["customQueryString"] = rc.CustomQueryString
+ }
+ if rc.OdataType != "" {
+ objectMap["@odata.type"] = rc.OdataType
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsForwardingConfiguration is the BasicRouteConfiguration implementation for RedirectConfiguration.
+func (rc RedirectConfiguration) AsForwardingConfiguration() (*ForwardingConfiguration, bool) {
+ return nil, false
+}
+
+// AsRedirectConfiguration is the BasicRouteConfiguration implementation for RedirectConfiguration.
+func (rc RedirectConfiguration) AsRedirectConfiguration() (*RedirectConfiguration, bool) {
+ return &rc, true
+}
+
+// AsRouteConfiguration is the BasicRouteConfiguration implementation for RedirectConfiguration.
+func (rc RedirectConfiguration) AsRouteConfiguration() (*RouteConfiguration, bool) {
+ return nil, false
+}
+
+// AsBasicRouteConfiguration is the BasicRouteConfiguration implementation for RedirectConfiguration.
+func (rc RedirectConfiguration) AsBasicRouteConfiguration() (BasicRouteConfiguration, bool) {
+ return &rc, true
+}
+
+// Resource common resource representation.
+type Resource struct {
+ // ID - READ-ONLY; Resource ID.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Resource name.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Resource type.
+ Type *string `json:"type,omitempty"`
+ // Location - Resource location.
+ Location *string `json:"location,omitempty"`
+ // Tags - Resource tags.
+ Tags map[string]*string `json:"tags"`
+}
+
+// MarshalJSON is the custom marshaler for Resource.
+func (r Resource) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if r.Location != nil {
+ objectMap["location"] = r.Location
+ }
+ if r.Tags != nil {
+ objectMap["tags"] = r.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// BasicRouteConfiguration base class for all types of Route.
+type BasicRouteConfiguration interface {
+ AsForwardingConfiguration() (*ForwardingConfiguration, bool)
+ AsRedirectConfiguration() (*RedirectConfiguration, bool)
+ AsRouteConfiguration() (*RouteConfiguration, bool)
+}
+
+// RouteConfiguration base class for all types of Route.
+type RouteConfiguration struct {
+ // OdataType - Possible values include: 'OdataTypeRouteConfiguration', 'OdataTypeMicrosoftAzureFrontDoorModelsFrontdoorForwardingConfiguration', 'OdataTypeMicrosoftAzureFrontDoorModelsFrontdoorRedirectConfiguration'
+ OdataType OdataType `json:"@odata.type,omitempty"`
+}
+
+func unmarshalBasicRouteConfiguration(body []byte) (BasicRouteConfiguration, error) {
+ var m map[string]interface{}
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return nil, err
+ }
+
+ switch m["@odata.type"] {
+ case string(OdataTypeMicrosoftAzureFrontDoorModelsFrontdoorForwardingConfiguration):
+ var fc ForwardingConfiguration
+ err := json.Unmarshal(body, &fc)
+ return fc, err
+ case string(OdataTypeMicrosoftAzureFrontDoorModelsFrontdoorRedirectConfiguration):
+ var rc RedirectConfiguration
+ err := json.Unmarshal(body, &rc)
+ return rc, err
+ default:
+ var rc RouteConfiguration
+ err := json.Unmarshal(body, &rc)
+ return rc, err
+ }
+}
+func unmarshalBasicRouteConfigurationArray(body []byte) ([]BasicRouteConfiguration, error) {
+ var rawMessages []*json.RawMessage
+ err := json.Unmarshal(body, &rawMessages)
+ if err != nil {
+ return nil, err
+ }
+
+ rcArray := make([]BasicRouteConfiguration, len(rawMessages))
+
+ for index, rawMessage := range rawMessages {
+ rc, err := unmarshalBasicRouteConfiguration(*rawMessage)
+ if err != nil {
+ return nil, err
+ }
+ rcArray[index] = rc
+ }
+ return rcArray, nil
+}
+
+// MarshalJSON is the custom marshaler for RouteConfiguration.
+func (rc RouteConfiguration) MarshalJSON() ([]byte, error) {
+ rc.OdataType = OdataTypeRouteConfiguration
+ objectMap := make(map[string]interface{})
+ if rc.OdataType != "" {
+ objectMap["@odata.type"] = rc.OdataType
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsForwardingConfiguration is the BasicRouteConfiguration implementation for RouteConfiguration.
+func (rc RouteConfiguration) AsForwardingConfiguration() (*ForwardingConfiguration, bool) {
+ return nil, false
+}
+
+// AsRedirectConfiguration is the BasicRouteConfiguration implementation for RouteConfiguration.
+func (rc RouteConfiguration) AsRedirectConfiguration() (*RedirectConfiguration, bool) {
+ return nil, false
+}
+
+// AsRouteConfiguration is the BasicRouteConfiguration implementation for RouteConfiguration.
+func (rc RouteConfiguration) AsRouteConfiguration() (*RouteConfiguration, bool) {
+ return &rc, true
+}
+
+// AsBasicRouteConfiguration is the BasicRouteConfiguration implementation for RouteConfiguration.
+func (rc RouteConfiguration) AsBasicRouteConfiguration() (BasicRouteConfiguration, bool) {
+ return &rc, true
+}
+
+// RoutingRule a routing rule represents a specification for traffic to treat and where to send it, along
+// with health probe information.
+type RoutingRule struct {
+ autorest.Response `json:"-"`
+ // RoutingRuleProperties - Properties of the Front Door Routing Rule
+ *RoutingRuleProperties `json:"properties,omitempty"`
+ // Name - Resource name.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Resource type.
+ Type *string `json:"type,omitempty"`
+ // ID - Resource ID.
+ ID *string `json:"id,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for RoutingRule.
+func (rr RoutingRule) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if rr.RoutingRuleProperties != nil {
+ objectMap["properties"] = rr.RoutingRuleProperties
+ }
+ if rr.Name != nil {
+ objectMap["name"] = rr.Name
+ }
+ if rr.ID != nil {
+ objectMap["id"] = rr.ID
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for RoutingRule struct.
+func (rr *RoutingRule) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var routingRuleProperties RoutingRuleProperties
+ err = json.Unmarshal(*v, &routingRuleProperties)
+ if err != nil {
+ return err
+ }
+ rr.RoutingRuleProperties = &routingRuleProperties
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ rr.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ rr.Type = &typeVar
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ rr.ID = &ID
+ }
+ }
+ }
+
+ return nil
+}
+
+// RoutingRuleListResult result of the request to list Routing Rules. It contains a list of Routing Rule
+// objects and a URL link to get the next set of results.
+type RoutingRuleListResult struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; List of Routing Rules within a Front Door.
+ Value *[]RoutingRule `json:"value,omitempty"`
+ // NextLink - URL to get the next set of RoutingRule objects if there are any.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// RoutingRuleListResultIterator provides access to a complete listing of RoutingRule values.
+type RoutingRuleListResultIterator struct {
+ i int
+ page RoutingRuleListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *RoutingRuleListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/RoutingRuleListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *RoutingRuleListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter RoutingRuleListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter RoutingRuleListResultIterator) Response() RoutingRuleListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter RoutingRuleListResultIterator) Value() RoutingRule {
+ if !iter.page.NotDone() {
+ return RoutingRule{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the RoutingRuleListResultIterator type.
+func NewRoutingRuleListResultIterator(page RoutingRuleListResultPage) RoutingRuleListResultIterator {
+ return RoutingRuleListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (rrlr RoutingRuleListResult) IsEmpty() bool {
+ return rrlr.Value == nil || len(*rrlr.Value) == 0
+}
+
+// routingRuleListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (rrlr RoutingRuleListResult) routingRuleListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if rrlr.NextLink == nil || len(to.String(rrlr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(rrlr.NextLink)))
+}
+
+// RoutingRuleListResultPage contains a page of RoutingRule values.
+type RoutingRuleListResultPage struct {
+ fn func(context.Context, RoutingRuleListResult) (RoutingRuleListResult, error)
+ rrlr RoutingRuleListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *RoutingRuleListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/RoutingRuleListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.rrlr)
+ if err != nil {
+ return err
+ }
+ page.rrlr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *RoutingRuleListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page RoutingRuleListResultPage) NotDone() bool {
+ return !page.rrlr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page RoutingRuleListResultPage) Response() RoutingRuleListResult {
+ return page.rrlr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page RoutingRuleListResultPage) Values() []RoutingRule {
+ if page.rrlr.IsEmpty() {
+ return nil
+ }
+ return *page.rrlr.Value
+}
+
+// Creates a new instance of the RoutingRuleListResultPage type.
+func NewRoutingRuleListResultPage(getNextPage func(context.Context, RoutingRuleListResult) (RoutingRuleListResult, error)) RoutingRuleListResultPage {
+ return RoutingRuleListResultPage{fn: getNextPage}
+}
+
+// RoutingRuleProperties the JSON object that contains the properties required to create a routing rule.
+type RoutingRuleProperties struct {
+ // ResourceState - Resource status. Possible values include: 'ResourceStateCreating', 'ResourceStateEnabling', 'ResourceStateEnabled', 'ResourceStateDisabling', 'ResourceStateDisabled', 'ResourceStateDeleting'
+ ResourceState ResourceState `json:"resourceState,omitempty"`
+ // FrontendEndpoints - Frontend endpoints associated with this rule
+ FrontendEndpoints *[]SubResource `json:"frontendEndpoints,omitempty"`
+ // AcceptedProtocols - Protocol schemes to match for this rule
+ AcceptedProtocols *[]Protocol `json:"acceptedProtocols,omitempty"`
+ // PatternsToMatch - The route patterns of the rule.
+ PatternsToMatch *[]string `json:"patternsToMatch,omitempty"`
+ // EnabledState - Whether to enable use of this rule. Permitted values are 'Enabled' or 'Disabled'. Possible values include: 'RoutingRuleEnabledStateEnabled', 'RoutingRuleEnabledStateDisabled'
+ EnabledState RoutingRuleEnabledState `json:"enabledState,omitempty"`
+ // RouteConfiguration - A reference to the routing configuration.
+ RouteConfiguration BasicRouteConfiguration `json:"routeConfiguration,omitempty"`
+}
+
+// UnmarshalJSON is the custom unmarshaler for RoutingRuleProperties struct.
+func (rrp *RoutingRuleProperties) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "resourceState":
+ if v != nil {
+ var resourceState ResourceState
+ err = json.Unmarshal(*v, &resourceState)
+ if err != nil {
+ return err
+ }
+ rrp.ResourceState = resourceState
+ }
+ case "frontendEndpoints":
+ if v != nil {
+ var frontendEndpoints []SubResource
+ err = json.Unmarshal(*v, &frontendEndpoints)
+ if err != nil {
+ return err
+ }
+ rrp.FrontendEndpoints = &frontendEndpoints
+ }
+ case "acceptedProtocols":
+ if v != nil {
+ var acceptedProtocols []Protocol
+ err = json.Unmarshal(*v, &acceptedProtocols)
+ if err != nil {
+ return err
+ }
+ rrp.AcceptedProtocols = &acceptedProtocols
+ }
+ case "patternsToMatch":
+ if v != nil {
+ var patternsToMatch []string
+ err = json.Unmarshal(*v, &patternsToMatch)
+ if err != nil {
+ return err
+ }
+ rrp.PatternsToMatch = &patternsToMatch
+ }
+ case "enabledState":
+ if v != nil {
+ var enabledState RoutingRuleEnabledState
+ err = json.Unmarshal(*v, &enabledState)
+ if err != nil {
+ return err
+ }
+ rrp.EnabledState = enabledState
+ }
+ case "routeConfiguration":
+ if v != nil {
+ routeConfiguration, err := unmarshalBasicRouteConfiguration(*v)
+ if err != nil {
+ return err
+ }
+ rrp.RouteConfiguration = routeConfiguration
+ }
+ }
+ }
+
+ return nil
+}
+
+// RoutingRulesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type RoutingRulesCreateOrUpdateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *RoutingRulesCreateOrUpdateFuture) Result(client RoutingRulesClient) (rr RoutingRule, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.RoutingRulesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("frontdoor.RoutingRulesCreateOrUpdateFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if rr.Response.Response, err = future.GetResult(sender); err == nil && rr.Response.Response.StatusCode != http.StatusNoContent {
+ rr, err = client.CreateOrUpdateResponder(rr.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.RoutingRulesCreateOrUpdateFuture", "Result", rr.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// RoutingRulesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type RoutingRulesDeleteFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *RoutingRulesDeleteFuture) Result(client RoutingRulesClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.RoutingRulesDeleteFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("frontdoor.RoutingRulesDeleteFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// RoutingRuleUpdateParameters routing rules to apply to an endpoint
+type RoutingRuleUpdateParameters struct {
+ // FrontendEndpoints - Frontend endpoints associated with this rule
+ FrontendEndpoints *[]SubResource `json:"frontendEndpoints,omitempty"`
+ // AcceptedProtocols - Protocol schemes to match for this rule
+ AcceptedProtocols *[]Protocol `json:"acceptedProtocols,omitempty"`
+ // PatternsToMatch - The route patterns of the rule.
+ PatternsToMatch *[]string `json:"patternsToMatch,omitempty"`
+ // EnabledState - Whether to enable use of this rule. Permitted values are 'Enabled' or 'Disabled'. Possible values include: 'RoutingRuleEnabledStateEnabled', 'RoutingRuleEnabledStateDisabled'
+ EnabledState RoutingRuleEnabledState `json:"enabledState,omitempty"`
+ // RouteConfiguration - A reference to the routing configuration.
+ RouteConfiguration BasicRouteConfiguration `json:"routeConfiguration,omitempty"`
+}
+
+// UnmarshalJSON is the custom unmarshaler for RoutingRuleUpdateParameters struct.
+func (rrup *RoutingRuleUpdateParameters) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "frontendEndpoints":
+ if v != nil {
+ var frontendEndpoints []SubResource
+ err = json.Unmarshal(*v, &frontendEndpoints)
+ if err != nil {
+ return err
+ }
+ rrup.FrontendEndpoints = &frontendEndpoints
+ }
+ case "acceptedProtocols":
+ if v != nil {
+ var acceptedProtocols []Protocol
+ err = json.Unmarshal(*v, &acceptedProtocols)
+ if err != nil {
+ return err
+ }
+ rrup.AcceptedProtocols = &acceptedProtocols
+ }
+ case "patternsToMatch":
+ if v != nil {
+ var patternsToMatch []string
+ err = json.Unmarshal(*v, &patternsToMatch)
+ if err != nil {
+ return err
+ }
+ rrup.PatternsToMatch = &patternsToMatch
+ }
+ case "enabledState":
+ if v != nil {
+ var enabledState RoutingRuleEnabledState
+ err = json.Unmarshal(*v, &enabledState)
+ if err != nil {
+ return err
+ }
+ rrup.EnabledState = enabledState
+ }
+ case "routeConfiguration":
+ if v != nil {
+ routeConfiguration, err := unmarshalBasicRouteConfiguration(*v)
+ if err != nil {
+ return err
+ }
+ rrup.RouteConfiguration = routeConfiguration
+ }
+ }
+ }
+
+ return nil
+}
+
+// SubResource reference to another subresource.
+type SubResource struct {
+ // ID - Resource ID.
+ ID *string `json:"id,omitempty"`
+}
+
+// TagsObject tags object for patch operations.
+type TagsObject struct {
+ // Tags - Resource tags.
+ Tags map[string]*string `json:"tags"`
+}
+
+// MarshalJSON is the custom marshaler for TagsObject.
+func (toVar TagsObject) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if toVar.Tags != nil {
+ objectMap["tags"] = toVar.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// UpdateParameters the properties needed to update a Front Door
+type UpdateParameters struct {
+ // FriendlyName - A friendly name for the frontDoor
+ FriendlyName *string `json:"friendlyName,omitempty"`
+ // RoutingRules - Routing rules associated with this Front Door.
+ RoutingRules *[]RoutingRule `json:"routingRules,omitempty"`
+ // LoadBalancingSettings - Load balancing settings associated with this Front Door instance.
+ LoadBalancingSettings *[]LoadBalancingSettingsModel `json:"loadBalancingSettings,omitempty"`
+ // HealthProbeSettings - Health probe settings associated with this Front Door instance.
+ HealthProbeSettings *[]HealthProbeSettingsModel `json:"healthProbeSettings,omitempty"`
+ // BackendPools - Backend pools available to routing rules.
+ BackendPools *[]BackendPool `json:"backendPools,omitempty"`
+ // FrontendEndpoints - Frontend endpoints available to routing rules.
+ FrontendEndpoints *[]FrontendEndpoint `json:"frontendEndpoints,omitempty"`
+ // BackendPoolsSettings - Settings for all backendPools
+ BackendPoolsSettings *BackendPoolsSettings `json:"backendPoolsSettings,omitempty"`
+ // EnabledState - Operational status of the Front Door load balancer. Permitted values are 'Enabled' or 'Disabled'. Possible values include: 'EnabledStateEnabled', 'EnabledStateDisabled'
+ EnabledState EnabledState `json:"enabledState,omitempty"`
+}
+
+// ValidateCustomDomainInput input of the custom domain to be validated for DNS mapping.
+type ValidateCustomDomainInput struct {
+ // HostName - The host name of the custom domain. Must be a domain name.
+ HostName *string `json:"hostName,omitempty"`
+}
+
+// ValidateCustomDomainOutput output of custom domain validation.
+type ValidateCustomDomainOutput struct {
+ autorest.Response `json:"-"`
+ // CustomDomainValidated - READ-ONLY; Indicates whether the custom domain is valid or not.
+ CustomDomainValidated *bool `json:"customDomainValidated,omitempty"`
+ // Reason - READ-ONLY; The reason why the custom domain is not valid.
+ Reason *string `json:"reason,omitempty"`
+ // Message - READ-ONLY; Error message describing why the custom domain is not valid.
+ Message *string `json:"message,omitempty"`
+}
+
+// WebApplicationFirewallPolicy defines web application firewall policy.
+type WebApplicationFirewallPolicy struct {
+ autorest.Response `json:"-"`
+ // WebApplicationFirewallPolicyProperties - Properties of the web application firewall policy.
+ *WebApplicationFirewallPolicyProperties `json:"properties,omitempty"`
+ // Etag - Gets a unique read-only string that changes whenever the resource is updated.
+ Etag *string `json:"etag,omitempty"`
+ // ID - READ-ONLY; Resource ID.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Resource name.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Resource type.
+ Type *string `json:"type,omitempty"`
+ // Location - Resource location.
+ Location *string `json:"location,omitempty"`
+ // Tags - Resource tags.
+ Tags map[string]*string `json:"tags"`
+}
+
+// MarshalJSON is the custom marshaler for WebApplicationFirewallPolicy.
+func (wafp WebApplicationFirewallPolicy) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if wafp.WebApplicationFirewallPolicyProperties != nil {
+ objectMap["properties"] = wafp.WebApplicationFirewallPolicyProperties
+ }
+ if wafp.Etag != nil {
+ objectMap["etag"] = wafp.Etag
+ }
+ if wafp.Location != nil {
+ objectMap["location"] = wafp.Location
+ }
+ if wafp.Tags != nil {
+ objectMap["tags"] = wafp.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for WebApplicationFirewallPolicy struct.
+func (wafp *WebApplicationFirewallPolicy) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var webApplicationFirewallPolicyProperties WebApplicationFirewallPolicyProperties
+ err = json.Unmarshal(*v, &webApplicationFirewallPolicyProperties)
+ if err != nil {
+ return err
+ }
+ wafp.WebApplicationFirewallPolicyProperties = &webApplicationFirewallPolicyProperties
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ wafp.Etag = &etag
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ wafp.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ wafp.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ wafp.Type = &typeVar
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ wafp.Location = &location
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ wafp.Tags = tags
+ }
+ }
+ }
+
+ return nil
+}
+
+// WebApplicationFirewallPolicyList defines a list of WebApplicationFirewallPolicies. It contains a list of
+// WebApplicationFirewallPolicy objects and a URL link to get the next set of results.
+type WebApplicationFirewallPolicyList struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; List of WebApplicationFirewallPolicies within a resource group.
+ Value *[]WebApplicationFirewallPolicy `json:"value,omitempty"`
+ // NextLink - URL to get the next set of WebApplicationFirewallPolicy objects if there are any.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// WebApplicationFirewallPolicyListIterator provides access to a complete listing of
+// WebApplicationFirewallPolicy values.
+type WebApplicationFirewallPolicyListIterator struct {
+ i int
+ page WebApplicationFirewallPolicyListPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *WebApplicationFirewallPolicyListIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/WebApplicationFirewallPolicyListIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *WebApplicationFirewallPolicyListIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter WebApplicationFirewallPolicyListIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter WebApplicationFirewallPolicyListIterator) Response() WebApplicationFirewallPolicyList {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter WebApplicationFirewallPolicyListIterator) Value() WebApplicationFirewallPolicy {
+ if !iter.page.NotDone() {
+ return WebApplicationFirewallPolicy{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the WebApplicationFirewallPolicyListIterator type.
+func NewWebApplicationFirewallPolicyListIterator(page WebApplicationFirewallPolicyListPage) WebApplicationFirewallPolicyListIterator {
+ return WebApplicationFirewallPolicyListIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (wafpl WebApplicationFirewallPolicyList) IsEmpty() bool {
+ return wafpl.Value == nil || len(*wafpl.Value) == 0
+}
+
+// webApplicationFirewallPolicyListPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (wafpl WebApplicationFirewallPolicyList) webApplicationFirewallPolicyListPreparer(ctx context.Context) (*http.Request, error) {
+ if wafpl.NextLink == nil || len(to.String(wafpl.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(wafpl.NextLink)))
+}
+
+// WebApplicationFirewallPolicyListPage contains a page of WebApplicationFirewallPolicy values.
+type WebApplicationFirewallPolicyListPage struct {
+ fn func(context.Context, WebApplicationFirewallPolicyList) (WebApplicationFirewallPolicyList, error)
+ wafpl WebApplicationFirewallPolicyList
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *WebApplicationFirewallPolicyListPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/WebApplicationFirewallPolicyListPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.wafpl)
+ if err != nil {
+ return err
+ }
+ page.wafpl = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *WebApplicationFirewallPolicyListPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page WebApplicationFirewallPolicyListPage) NotDone() bool {
+ return !page.wafpl.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page WebApplicationFirewallPolicyListPage) Response() WebApplicationFirewallPolicyList {
+ return page.wafpl
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page WebApplicationFirewallPolicyListPage) Values() []WebApplicationFirewallPolicy {
+ if page.wafpl.IsEmpty() {
+ return nil
+ }
+ return *page.wafpl.Value
+}
+
+// Creates a new instance of the WebApplicationFirewallPolicyListPage type.
+func NewWebApplicationFirewallPolicyListPage(getNextPage func(context.Context, WebApplicationFirewallPolicyList) (WebApplicationFirewallPolicyList, error)) WebApplicationFirewallPolicyListPage {
+ return WebApplicationFirewallPolicyListPage{fn: getNextPage}
+}
+
+// WebApplicationFirewallPolicyProperties defines web application firewall policy properties.
+type WebApplicationFirewallPolicyProperties struct {
+ // PolicySettings - Describes settings for the policy.
+ PolicySettings *PolicySettings `json:"policySettings,omitempty"`
+ // CustomRules - Describes custom rules inside the policy.
+ CustomRules *CustomRuleList `json:"customRules,omitempty"`
+ // ManagedRules - Describes managed rules inside the policy.
+ ManagedRules *ManagedRuleSetList `json:"managedRules,omitempty"`
+ // FrontendEndpointLinks - READ-ONLY; Describes Frontend Endpoints associated with this Web Application Firewall policy.
+ FrontendEndpointLinks *[]FrontendEndpointLink `json:"frontendEndpointLinks,omitempty"`
+ // ProvisioningState - READ-ONLY; Provisioning state of the policy.
+ ProvisioningState *string `json:"provisioningState,omitempty"`
+ // ResourceState - READ-ONLY; Possible values include: 'PolicyResourceStateCreating', 'PolicyResourceStateEnabling', 'PolicyResourceStateEnabled', 'PolicyResourceStateDisabling', 'PolicyResourceStateDisabled', 'PolicyResourceStateDeleting'
+ ResourceState PolicyResourceState `json:"resourceState,omitempty"`
+}
diff --git a/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/policies.go b/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/policies.go
new file mode 100644
index 000000000000..89c4fdb4da6d
--- /dev/null
+++ b/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/policies.go
@@ -0,0 +1,433 @@
+package frontdoor
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// PoliciesClient is the frontDoor Client
+type PoliciesClient struct {
+ BaseClient
+}
+
+// NewPoliciesClient creates an instance of the PoliciesClient client.
+func NewPoliciesClient(subscriptionID string) PoliciesClient {
+ return NewPoliciesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewPoliciesClientWithBaseURI creates an instance of the PoliciesClient client.
+func NewPoliciesClientWithBaseURI(baseURI string, subscriptionID string) PoliciesClient {
+ return PoliciesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate create or update policy with specified rule set name within a resource group.
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// policyName - the name of the Web Application Firewall Policy.
+// parameters - policy to be created.
+func (client PoliciesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, policyName string, parameters WebApplicationFirewallPolicy) (result PoliciesCreateOrUpdateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoliciesClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
+ {TargetValue: policyName,
+ Constraints: []validation.Constraint{{Target: "policyName", Name: validation.MaxLength, Rule: 128, Chain: nil}}},
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters.WebApplicationFirewallPolicyProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.WebApplicationFirewallPolicyProperties.PolicySettings", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.WebApplicationFirewallPolicyProperties.PolicySettings.CustomBlockResponseBody", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.WebApplicationFirewallPolicyProperties.PolicySettings.CustomBlockResponseBody", Name: validation.Pattern, Rule: `^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{4})$`, Chain: nil}}},
+ }},
+ }}}}}); err != nil {
+ return result, validation.NewError("frontdoor.PoliciesClient", "CreateOrUpdate", err.Error())
+ }
+
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, policyName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.PoliciesClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.PoliciesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client PoliciesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, policyName string, parameters WebApplicationFirewallPolicy) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "policyName": autorest.Encode("path", policyName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-03-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/FrontDoorWebApplicationFirewallPolicies/{policyName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client PoliciesClient) CreateOrUpdateSender(req *http.Request) (future PoliciesCreateOrUpdateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client PoliciesClient) CreateOrUpdateResponder(resp *http.Response) (result WebApplicationFirewallPolicy, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes Policy
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// policyName - the name of the Web Application Firewall Policy.
+func (client PoliciesClient) Delete(ctx context.Context, resourceGroupName string, policyName string) (result PoliciesDeleteFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoliciesClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
+ {TargetValue: policyName,
+ Constraints: []validation.Constraint{{Target: "policyName", Name: validation.MaxLength, Rule: 128, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.PoliciesClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, policyName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.PoliciesClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.DeleteSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.PoliciesClient", "Delete", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client PoliciesClient) DeletePreparer(ctx context.Context, resourceGroupName string, policyName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "policyName": autorest.Encode("path", policyName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-03-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/FrontDoorWebApplicationFirewallPolicies/{policyName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client PoliciesClient) DeleteSender(req *http.Request) (future PoliciesDeleteFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client PoliciesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get retrieve protection policy with specified name within a resource group.
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// policyName - the name of the Web Application Firewall Policy.
+func (client PoliciesClient) Get(ctx context.Context, resourceGroupName string, policyName string) (result WebApplicationFirewallPolicy, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoliciesClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
+ {TargetValue: policyName,
+ Constraints: []validation.Constraint{{Target: "policyName", Name: validation.MaxLength, Rule: 128, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.PoliciesClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, policyName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.PoliciesClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "frontdoor.PoliciesClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.PoliciesClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client PoliciesClient) GetPreparer(ctx context.Context, resourceGroupName string, policyName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "policyName": autorest.Encode("path", policyName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-03-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/FrontDoorWebApplicationFirewallPolicies/{policyName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client PoliciesClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client PoliciesClient) GetResponder(resp *http.Response) (result WebApplicationFirewallPolicy, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List lists all of the protection policies within a resource group.
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+func (client PoliciesClient) List(ctx context.Context, resourceGroupName string) (result WebApplicationFirewallPolicyListPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoliciesClient.List")
+ defer func() {
+ sc := -1
+ if result.wafpl.Response.Response != nil {
+ sc = result.wafpl.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.PoliciesClient", "List", err.Error())
+ }
+
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx, resourceGroupName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.PoliciesClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.wafpl.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "frontdoor.PoliciesClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.wafpl, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.PoliciesClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client PoliciesClient) ListPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-03-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/FrontDoorWebApplicationFirewallPolicies", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client PoliciesClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client PoliciesClient) ListResponder(resp *http.Response) (result WebApplicationFirewallPolicyList, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client PoliciesClient) listNextResults(ctx context.Context, lastResults WebApplicationFirewallPolicyList) (result WebApplicationFirewallPolicyList, err error) {
+ req, err := lastResults.webApplicationFirewallPolicyListPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "frontdoor.PoliciesClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "frontdoor.PoliciesClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.PoliciesClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client PoliciesClient) ListComplete(ctx context.Context, resourceGroupName string) (result WebApplicationFirewallPolicyListIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PoliciesClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, resourceGroupName)
+ return
+}
diff --git a/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/routingrules.go b/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/routingrules.go
new file mode 100644
index 000000000000..bdba08555973
--- /dev/null
+++ b/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/routingrules.go
@@ -0,0 +1,457 @@
+package frontdoor
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// RoutingRulesClient is the frontDoor Client
+type RoutingRulesClient struct {
+ BaseClient
+}
+
+// NewRoutingRulesClient creates an instance of the RoutingRulesClient client.
+func NewRoutingRulesClient(subscriptionID string) RoutingRulesClient {
+ return NewRoutingRulesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewRoutingRulesClientWithBaseURI creates an instance of the RoutingRulesClient client.
+func NewRoutingRulesClientWithBaseURI(baseURI string, subscriptionID string) RoutingRulesClient {
+ return RoutingRulesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate creates a new Routing Rule with the specified Rule name within the specified Front Door.
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// frontDoorName - name of the Front Door which is globally unique.
+// routingRuleName - name of the Routing Rule which is unique within the Front Door.
+// routingRuleParameters - routing Rule properties needed to create a new Front Door.
+func (client RoutingRulesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, frontDoorName string, routingRuleName string, routingRuleParameters RoutingRule) (result RoutingRulesCreateOrUpdateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/RoutingRulesClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
+ {TargetValue: frontDoorName,
+ Constraints: []validation.Constraint{{Target: "frontDoorName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "frontDoorName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "frontDoorName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$`, Chain: nil}}},
+ {TargetValue: routingRuleName,
+ Constraints: []validation.Constraint{{Target: "routingRuleName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "routingRuleName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "routingRuleName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.RoutingRulesClient", "CreateOrUpdate", err.Error())
+ }
+
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, frontDoorName, routingRuleName, routingRuleParameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.RoutingRulesClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.RoutingRulesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client RoutingRulesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, frontDoorName string, routingRuleName string, routingRuleParameters RoutingRule) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "frontDoorName": autorest.Encode("path", frontDoorName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "routingRuleName": autorest.Encode("path", routingRuleName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ routingRuleParameters.Type = nil
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/routingRules/{routingRuleName}", pathParameters),
+ autorest.WithJSON(routingRuleParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client RoutingRulesClient) CreateOrUpdateSender(req *http.Request) (future RoutingRulesCreateOrUpdateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client RoutingRulesClient) CreateOrUpdateResponder(resp *http.Response) (result RoutingRule, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes an existing Routing Rule with the specified parameters.
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// frontDoorName - name of the Front Door which is globally unique.
+// routingRuleName - name of the Routing Rule which is unique within the Front Door.
+func (client RoutingRulesClient) Delete(ctx context.Context, resourceGroupName string, frontDoorName string, routingRuleName string) (result RoutingRulesDeleteFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/RoutingRulesClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
+ {TargetValue: frontDoorName,
+ Constraints: []validation.Constraint{{Target: "frontDoorName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "frontDoorName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "frontDoorName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$`, Chain: nil}}},
+ {TargetValue: routingRuleName,
+ Constraints: []validation.Constraint{{Target: "routingRuleName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "routingRuleName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "routingRuleName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.RoutingRulesClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, frontDoorName, routingRuleName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.RoutingRulesClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.DeleteSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.RoutingRulesClient", "Delete", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client RoutingRulesClient) DeletePreparer(ctx context.Context, resourceGroupName string, frontDoorName string, routingRuleName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "frontDoorName": autorest.Encode("path", frontDoorName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "routingRuleName": autorest.Encode("path", routingRuleName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/routingRules/{routingRuleName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client RoutingRulesClient) DeleteSender(req *http.Request) (future RoutingRulesDeleteFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client RoutingRulesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets a Routing Rule with the specified Rule name within the specified Front Door.
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// frontDoorName - name of the Front Door which is globally unique.
+// routingRuleName - name of the Routing Rule which is unique within the Front Door.
+func (client RoutingRulesClient) Get(ctx context.Context, resourceGroupName string, frontDoorName string, routingRuleName string) (result RoutingRule, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/RoutingRulesClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
+ {TargetValue: frontDoorName,
+ Constraints: []validation.Constraint{{Target: "frontDoorName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "frontDoorName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "frontDoorName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$`, Chain: nil}}},
+ {TargetValue: routingRuleName,
+ Constraints: []validation.Constraint{{Target: "routingRuleName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "routingRuleName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "routingRuleName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+(-*[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.RoutingRulesClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, frontDoorName, routingRuleName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.RoutingRulesClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "frontdoor.RoutingRulesClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.RoutingRulesClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client RoutingRulesClient) GetPreparer(ctx context.Context, resourceGroupName string, frontDoorName string, routingRuleName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "frontDoorName": autorest.Encode("path", frontDoorName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "routingRuleName": autorest.Encode("path", routingRuleName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/routingRules/{routingRuleName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client RoutingRulesClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client RoutingRulesClient) GetResponder(resp *http.Response) (result RoutingRule, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByFrontDoor lists all of the Routing Rules within a Front Door.
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// frontDoorName - name of the Front Door which is globally unique.
+func (client RoutingRulesClient) ListByFrontDoor(ctx context.Context, resourceGroupName string, frontDoorName string) (result RoutingRuleListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/RoutingRulesClient.ListByFrontDoor")
+ defer func() {
+ sc := -1
+ if result.rrlr.Response.Response != nil {
+ sc = result.rrlr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 80, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9_\-\(\)\.]*[^\.]$`, Chain: nil}}},
+ {TargetValue: frontDoorName,
+ Constraints: []validation.Constraint{{Target: "frontDoorName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "frontDoorName", Name: validation.MinLength, Rule: 5, Chain: nil},
+ {Target: "frontDoorName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([-a-zA-Z0-9]?[a-zA-Z0-9])*$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("frontdoor.RoutingRulesClient", "ListByFrontDoor", err.Error())
+ }
+
+ result.fn = client.listByFrontDoorNextResults
+ req, err := client.ListByFrontDoorPreparer(ctx, resourceGroupName, frontDoorName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.RoutingRulesClient", "ListByFrontDoor", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByFrontDoorSender(req)
+ if err != nil {
+ result.rrlr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "frontdoor.RoutingRulesClient", "ListByFrontDoor", resp, "Failure sending request")
+ return
+ }
+
+ result.rrlr, err = client.ListByFrontDoorResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.RoutingRulesClient", "ListByFrontDoor", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByFrontDoorPreparer prepares the ListByFrontDoor request.
+func (client RoutingRulesClient) ListByFrontDoorPreparer(ctx context.Context, resourceGroupName string, frontDoorName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "frontDoorName": autorest.Encode("path", frontDoorName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/frontDoors/{frontDoorName}/routingRules", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByFrontDoorSender sends the ListByFrontDoor request. The method will close the
+// http.Response Body if it receives an error.
+func (client RoutingRulesClient) ListByFrontDoorSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByFrontDoorResponder handles the response to the ListByFrontDoor request. The method always
+// closes the http.Response Body.
+func (client RoutingRulesClient) ListByFrontDoorResponder(resp *http.Response) (result RoutingRuleListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByFrontDoorNextResults retrieves the next set of results, if any.
+func (client RoutingRulesClient) listByFrontDoorNextResults(ctx context.Context, lastResults RoutingRuleListResult) (result RoutingRuleListResult, err error) {
+ req, err := lastResults.routingRuleListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "frontdoor.RoutingRulesClient", "listByFrontDoorNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByFrontDoorSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "frontdoor.RoutingRulesClient", "listByFrontDoorNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByFrontDoorResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "frontdoor.RoutingRulesClient", "listByFrontDoorNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByFrontDoorComplete enumerates all values, automatically crossing page boundaries as required.
+func (client RoutingRulesClient) ListByFrontDoorComplete(ctx context.Context, resourceGroupName string, frontDoorName string) (result RoutingRuleListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/RoutingRulesClient.ListByFrontDoor")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByFrontDoor(ctx, resourceGroupName, frontDoorName)
+ return
+}
diff --git a/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/version.go b/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/version.go
new file mode 100644
index 000000000000..fb5fbd023513
--- /dev/null
+++ b/services/preview/frontdoor/mgmt/2019-05-01/frontdoor/version.go
@@ -0,0 +1,30 @@
+package frontdoor
+
+import "github.com/Azure/azure-sdk-for-go/version"
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+// UserAgent returns the UserAgent string to use when sending http.Requests.
+func UserAgent() string {
+ return "Azure-SDK-For-Go/" + version.Number + " frontdoor/2019-05-01"
+}
+
+// Version returns the semantic version (see http://semver.org) of the client.
+func Version() string {
+ return version.Number
+}
diff --git a/services/preview/hanaonazure/mgmt/2017-11-03-preview/hanaonazure/hanainstances.go b/services/preview/hanaonazure/mgmt/2017-11-03-preview/hanaonazure/hanainstances.go
index 951ed68d2440..485c2b7576bf 100644
--- a/services/preview/hanaonazure/mgmt/2017-11-03-preview/hanaonazure/hanainstances.go
+++ b/services/preview/hanaonazure/mgmt/2017-11-03-preview/hanaonazure/hanainstances.go
@@ -196,85 +196,6 @@ func (client HanaInstancesClient) DeleteResponder(resp *http.Response) (result a
return
}
-// EnableMonitoring the operation to add a monitor to an SAP HANA instance.
-// Parameters:
-// resourceGroupName - name of the resource group.
-// hanaInstanceName - name of the SAP HANA on Azure instance.
-// monitoringParameter - request body that only contains monitoring attributes
-func (client HanaInstancesClient) EnableMonitoring(ctx context.Context, resourceGroupName string, hanaInstanceName string, monitoringParameter MonitoringDetails) (result HanaInstancesEnableMonitoringFuture, err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/HanaInstancesClient.EnableMonitoring")
- defer func() {
- sc := -1
- if result.Response() != nil {
- sc = result.Response().StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- req, err := client.EnableMonitoringPreparer(ctx, resourceGroupName, hanaInstanceName, monitoringParameter)
- if err != nil {
- err = autorest.NewErrorWithError(err, "hanaonazure.HanaInstancesClient", "EnableMonitoring", nil, "Failure preparing request")
- return
- }
-
- result, err = client.EnableMonitoringSender(req)
- if err != nil {
- err = autorest.NewErrorWithError(err, "hanaonazure.HanaInstancesClient", "EnableMonitoring", result.Response(), "Failure sending request")
- return
- }
-
- return
-}
-
-// EnableMonitoringPreparer prepares the EnableMonitoring request.
-func (client HanaInstancesClient) EnableMonitoringPreparer(ctx context.Context, resourceGroupName string, hanaInstanceName string, monitoringParameter MonitoringDetails) (*http.Request, error) {
- pathParameters := map[string]interface{}{
- "hanaInstanceName": autorest.Encode("path", hanaInstanceName),
- "resourceGroupName": autorest.Encode("path", resourceGroupName),
- "subscriptionId": autorest.Encode("path", client.SubscriptionID),
- }
-
- const APIVersion = "2017-11-03-preview"
- queryParameters := map[string]interface{}{
- "api-version": APIVersion,
- }
-
- preparer := autorest.CreatePreparer(
- autorest.AsContentType("application/json; charset=utf-8"),
- autorest.AsPost(),
- autorest.WithBaseURL(client.BaseURI),
- autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HanaOnAzure/hanaInstances/{hanaInstanceName}/monitoring", pathParameters),
- autorest.WithJSON(monitoringParameter),
- autorest.WithQueryParameters(queryParameters))
- return preparer.Prepare((&http.Request{}).WithContext(ctx))
-}
-
-// EnableMonitoringSender sends the EnableMonitoring request. The method will close the
-// http.Response Body if it receives an error.
-func (client HanaInstancesClient) EnableMonitoringSender(req *http.Request) (future HanaInstancesEnableMonitoringFuture, err error) {
- sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
- var resp *http.Response
- resp, err = autorest.SendWithSender(client, req, sd...)
- if err != nil {
- return
- }
- future.Future, err = azure.NewFutureFromResponse(resp)
- return
-}
-
-// EnableMonitoringResponder handles the response to the EnableMonitoring request. The method always
-// closes the http.Response Body.
-func (client HanaInstancesClient) EnableMonitoringResponder(resp *http.Response) (result autorest.Response, err error) {
- err = autorest.Respond(
- resp,
- client.ByInspecting(),
- azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
- autorest.ByClosing())
- result.Response = resp
- return
-}
-
// Get gets properties of a SAP HANA instance for the specified subscription, resource group, and instance name.
// Parameters:
// resourceGroupName - name of the resource group.
diff --git a/services/preview/hanaonazure/mgmt/2017-11-03-preview/hanaonazure/hanaonazureapi/interfaces.go b/services/preview/hanaonazure/mgmt/2017-11-03-preview/hanaonazure/hanaonazureapi/interfaces.go
index 1a2908a67e53..8201279ac087 100644
--- a/services/preview/hanaonazure/mgmt/2017-11-03-preview/hanaonazure/hanaonazureapi/interfaces.go
+++ b/services/preview/hanaonazure/mgmt/2017-11-03-preview/hanaonazure/hanaonazureapi/interfaces.go
@@ -33,7 +33,6 @@ var _ OperationsClientAPI = (*hanaonazure.OperationsClient)(nil)
type HanaInstancesClientAPI interface {
Create(ctx context.Context, resourceGroupName string, hanaInstanceName string, hanaInstanceParameter hanaonazure.HanaInstance) (result hanaonazure.HanaInstancesCreateFuture, err error)
Delete(ctx context.Context, resourceGroupName string, hanaInstanceName string) (result hanaonazure.HanaInstancesDeleteFuture, err error)
- EnableMonitoring(ctx context.Context, resourceGroupName string, hanaInstanceName string, monitoringParameter hanaonazure.MonitoringDetails) (result hanaonazure.HanaInstancesEnableMonitoringFuture, err error)
Get(ctx context.Context, resourceGroupName string, hanaInstanceName string) (result hanaonazure.HanaInstance, err error)
List(ctx context.Context) (result hanaonazure.HanaInstancesListResultPage, err error)
ListByResourceGroup(ctx context.Context, resourceGroupName string) (result hanaonazure.HanaInstancesListResultPage, err error)
diff --git a/services/preview/hanaonazure/mgmt/2017-11-03-preview/hanaonazure/models.go b/services/preview/hanaonazure/mgmt/2017-11-03-preview/hanaonazure/models.go
index f6323fbea802..de3e9d7519b2 100644
--- a/services/preview/hanaonazure/mgmt/2017-11-03-preview/hanaonazure/models.go
+++ b/services/preview/hanaonazure/mgmt/2017-11-03-preview/hanaonazure/models.go
@@ -356,29 +356,6 @@ func (future *HanaInstancesDeleteFuture) Result(client HanaInstancesClient) (ar
return
}
-// HanaInstancesEnableMonitoringFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type HanaInstancesEnableMonitoringFuture struct {
- azure.Future
-}
-
-// Result returns the result of the asynchronous operation.
-// If the operation has not completed it will return an error.
-func (future *HanaInstancesEnableMonitoringFuture) Result(client HanaInstancesClient) (ar autorest.Response, err error) {
- var done bool
- done, err = future.DoneWithContext(context.Background(), client)
- if err != nil {
- err = autorest.NewErrorWithError(err, "hanaonazure.HanaInstancesEnableMonitoringFuture", "Result", future.Response(), "Polling failure")
- return
- }
- if !done {
- err = azure.NewAsyncOpIncompleteError("hanaonazure.HanaInstancesEnableMonitoringFuture")
- return
- }
- ar.Response = future.Response()
- return
-}
-
// HanaInstancesListResult the response from the List HANA Instances operation.
type HanaInstancesListResult struct {
autorest.Response `json:"-"`
@@ -944,8 +921,14 @@ type SapMonitorProperties struct {
HanaDbPasswordKeyVaultURL *string `json:"hanaDbPasswordKeyVaultUrl,omitempty"`
// HanaDbCredentialsMsiID - MSI ID passed by customer which has access to customer's KeyVault and to be assigned to the Collector VM.
HanaDbCredentialsMsiID *string `json:"hanaDbCredentialsMsiId,omitempty"`
+ // KeyVaultID - Key Vault ID containing customer's HANA credentials.
+ KeyVaultID *string `json:"keyVaultId,omitempty"`
// ProvisioningState - READ-ONLY; State of provisioning of the HanaInstance. Possible values include: 'Accepted', 'Creating', 'Updating', 'Failed', 'Succeeded', 'Deleting', 'Migrating'
ProvisioningState HanaProvisioningStatesEnum `json:"provisioningState,omitempty"`
+ // ManagedResourceGroupName - READ-ONLY; The name of the resource group the SAP Monitor resources get deployed into.
+ ManagedResourceGroupName *string `json:"managedResourceGroupName,omitempty"`
+ // LogAnalyticsWorkspaceArmID - READ-ONLY; The ARM ID of the Log Analytics Workspace that is used for monitoring
+ LogAnalyticsWorkspaceArmID *string `json:"logAnalyticsWorkspaceArmId,omitempty"`
}
// SapMonitorsCreateFuture an abstraction for monitoring and retrieving the results of a long-running
diff --git a/services/preview/healthcareapis/mgmt/2018-08-20-preview/healthcareapis/models.go b/services/preview/healthcareapis/mgmt/2018-08-20-preview/healthcareapis/models.go
index e5eb52d1bcbf..26f188dba6f5 100644
--- a/services/preview/healthcareapis/mgmt/2018-08-20-preview/healthcareapis/models.go
+++ b/services/preview/healthcareapis/mgmt/2018-08-20-preview/healthcareapis/models.go
@@ -30,6 +30,23 @@ import (
// The package's fully qualified name.
const fqdn = "github.com/Azure/azure-sdk-for-go/services/preview/healthcareapis/mgmt/2018-08-20-preview/healthcareapis"
+// Kind enumerates the values for kind.
+type Kind string
+
+const (
+ // Fhir ...
+ Fhir Kind = "fhir"
+ // FhirR4 ...
+ FhirR4 Kind = "fhir-R4"
+ // FhirStu3 ...
+ FhirStu3 Kind = "fhir-Stu3"
+)
+
+// PossibleKindValues returns an array of possible values for the Kind const type.
+func PossibleKindValues() []Kind {
+ return []Kind{Fhir, FhirR4, FhirStu3}
+}
+
// OperationResultStatus enumerates the values for operation result status.
type OperationResultStatus string
@@ -310,8 +327,8 @@ type Resource struct {
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; The resource type.
Type *string `json:"type,omitempty"`
- // Kind - The kind of the service. Valid values are: fhir.
- Kind *string `json:"kind,omitempty"`
+ // Kind - The kind of the service. Valid values are: fhir, fhir-Stu3 and fhir-R4. Possible values include: 'Fhir', 'FhirStu3', 'FhirR4'
+ Kind Kind `json:"kind,omitempty"`
// Location - The resource location.
Location *string `json:"location,omitempty"`
// Tags - The resource tags.
@@ -323,7 +340,7 @@ type Resource struct {
// MarshalJSON is the custom marshaler for Resource.
func (r Resource) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
- if r.Kind != nil {
+ if r.Kind != "" {
objectMap["kind"] = r.Kind
}
if r.Location != nil {
@@ -437,8 +454,8 @@ type ServicesDescription struct {
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; The resource type.
Type *string `json:"type,omitempty"`
- // Kind - The kind of the service. Valid values are: fhir.
- Kind *string `json:"kind,omitempty"`
+ // Kind - The kind of the service. Valid values are: fhir, fhir-Stu3 and fhir-R4. Possible values include: 'Fhir', 'FhirStu3', 'FhirR4'
+ Kind Kind `json:"kind,omitempty"`
// Location - The resource location.
Location *string `json:"location,omitempty"`
// Tags - The resource tags.
@@ -453,7 +470,7 @@ func (sd ServicesDescription) MarshalJSON() ([]byte, error) {
if sd.Properties != nil {
objectMap["properties"] = sd.Properties
}
- if sd.Kind != nil {
+ if sd.Kind != "" {
objectMap["kind"] = sd.Kind
}
if sd.Location != nil {
diff --git a/services/preview/logic/mgmt/2018-07-01-preview/logic/logicapi/interfaces.go b/services/preview/logic/mgmt/2018-07-01-preview/logic/logicapi/interfaces.go
index 4d949736cd29..5f33f0ecaf2e 100644
--- a/services/preview/logic/mgmt/2018-07-01-preview/logic/logicapi/interfaces.go
+++ b/services/preview/logic/mgmt/2018-07-01-preview/logic/logicapi/interfaces.go
@@ -84,6 +84,7 @@ var _ WorkflowTriggerHistoriesClientAPI = (*logic.WorkflowTriggerHistoriesClient
// WorkflowRunsClientAPI contains the set of methods on the WorkflowRunsClient type.
type WorkflowRunsClientAPI interface {
Cancel(ctx context.Context, resourceGroupName string, workflowName string, runName string) (result autorest.Response, err error)
+ Delete(ctx context.Context, resourceGroupName string, workflowName string, runName string) (result autorest.Response, err error)
Get(ctx context.Context, resourceGroupName string, workflowName string, runName string) (result logic.WorkflowRun, err error)
List(ctx context.Context, resourceGroupName string, workflowName string, top *int32, filter string) (result logic.WorkflowRunListResultPage, err error)
}
diff --git a/services/preview/logic/mgmt/2018-07-01-preview/logic/models.go b/services/preview/logic/mgmt/2018-07-01-preview/logic/models.go
index 2ff199c0d4d1..944db3a1b637 100644
--- a/services/preview/logic/mgmt/2018-07-01-preview/logic/models.go
+++ b/services/preview/logic/mgmt/2018-07-01-preview/logic/models.go
@@ -4288,7 +4288,7 @@ type WorkflowProperties struct {
Sku *Sku `json:"sku,omitempty"`
// IntegrationAccount - The integration account.
IntegrationAccount *ResourceReference `json:"integrationAccount,omitempty"`
- // Definition - The definition.
+ // Definition - The definition. See [Schema reference for Workflow Definition Language in Azure Logic Apps](https://aka.ms/logic-apps-workflow-definition-language).
Definition interface{} `json:"definition,omitempty"`
// Parameters - The parameters.
Parameters map[string]*WorkflowParameter `json:"parameters"`
diff --git a/services/preview/logic/mgmt/2018-07-01-preview/logic/workflowruns.go b/services/preview/logic/mgmt/2018-07-01-preview/logic/workflowruns.go
index a63a17802895..282571eb2b2e 100644
--- a/services/preview/logic/mgmt/2018-07-01-preview/logic/workflowruns.go
+++ b/services/preview/logic/mgmt/2018-07-01-preview/logic/workflowruns.go
@@ -118,6 +118,84 @@ func (client WorkflowRunsClient) CancelResponder(resp *http.Response) (result au
return
}
+// Delete deletes a workflow run.
+// Parameters:
+// resourceGroupName - the resource group name.
+// workflowName - the workflow name.
+// runName - the workflow run name.
+func (client WorkflowRunsClient) Delete(ctx context.Context, resourceGroupName string, workflowName string, runName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/WorkflowRunsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, resourceGroupName, workflowName, runName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "logic.WorkflowRunsClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "logic.WorkflowRunsClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "logic.WorkflowRunsClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client WorkflowRunsClient) DeletePreparer(ctx context.Context, resourceGroupName string, workflowName string, runName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "runName": autorest.Encode("path", runName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "workflowName": autorest.Encode("path", workflowName),
+ }
+
+ const APIVersion = "2018-07-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/runs/{runName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client WorkflowRunsClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client WorkflowRunsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
// Get gets a workflow run.
// Parameters:
// resourceGroupName - the resource group name.
diff --git a/services/preview/managednetwork/mgmt/2019-06-01-preview/managednetwork/client.go b/services/preview/managednetwork/mgmt/2019-06-01-preview/managednetwork/client.go
new file mode 100644
index 000000000000..a04580361ca7
--- /dev/null
+++ b/services/preview/managednetwork/mgmt/2019-06-01-preview/managednetwork/client.go
@@ -0,0 +1,53 @@
+// Package managednetwork implements the Azure ARM Managednetwork service API version 2019-06-01-preview.
+//
+// The Microsoft Azure Managed Network management API provides a RESTful set of web services that interact with
+// Microsoft Azure Networks service to programmatically view, control, change, and monitor your entire Azure network
+// centrally and with ease.
+package managednetwork
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/Azure/go-autorest/autorest"
+)
+
+const (
+ // DefaultBaseURI is the default URI used for the service Managednetwork
+ DefaultBaseURI = "https://management.azure.com"
+)
+
+// BaseClient is the base client for Managednetwork.
+type BaseClient struct {
+ autorest.Client
+ BaseURI string
+ SubscriptionID string
+}
+
+// New creates an instance of the BaseClient client.
+func New(subscriptionID string) BaseClient {
+ return NewWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewWithBaseURI creates an instance of the BaseClient client.
+func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient {
+ return BaseClient{
+ Client: autorest.NewClientWithUserAgent(UserAgent()),
+ BaseURI: baseURI,
+ SubscriptionID: subscriptionID,
+ }
+}
diff --git a/services/preview/managednetwork/mgmt/2019-06-01-preview/managednetwork/groups.go b/services/preview/managednetwork/mgmt/2019-06-01-preview/managednetwork/groups.go
new file mode 100644
index 000000000000..3dc627a19697
--- /dev/null
+++ b/services/preview/managednetwork/mgmt/2019-06-01-preview/managednetwork/groups.go
@@ -0,0 +1,420 @@
+package managednetwork
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// GroupsClient is the the Microsoft Azure Managed Network management API provides a RESTful set of web services that
+// interact with Microsoft Azure Networks service to programmatically view, control, change, and monitor your entire
+// Azure network centrally and with ease.
+type GroupsClient struct {
+ BaseClient
+}
+
+// NewGroupsClient creates an instance of the GroupsClient client.
+func NewGroupsClient(subscriptionID string) GroupsClient {
+ return NewGroupsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewGroupsClientWithBaseURI creates an instance of the GroupsClient client.
+func NewGroupsClientWithBaseURI(baseURI string, subscriptionID string) GroupsClient {
+ return GroupsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate the Put ManagedNetworkGroups operation creates or updates a Managed Network Group resource
+// Parameters:
+// managedNetworkGroup - parameters supplied to the create/update a Managed Network Group resource
+// resourceGroupName - the name of the resource group.
+// managedNetworkName - the name of the Managed Network.
+// managedNetworkGroupName - the name of the Managed Network Group.
+func (client GroupsClient) CreateOrUpdate(ctx context.Context, managedNetworkGroup Group, resourceGroupName string, managedNetworkName string, managedNetworkGroupName string) (result GroupsCreateOrUpdateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/GroupsClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.CreateOrUpdatePreparer(ctx, managedNetworkGroup, resourceGroupName, managedNetworkName, managedNetworkGroupName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.GroupsClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.GroupsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client GroupsClient) CreateOrUpdatePreparer(ctx context.Context, managedNetworkGroup Group, resourceGroupName string, managedNetworkName string, managedNetworkGroupName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "managedNetworkGroupName": autorest.Encode("path", managedNetworkGroupName),
+ "managedNetworkName": autorest.Encode("path", managedNetworkName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetwork/managedNetworks/{managedNetworkName}/managedNetworkGroups/{managedNetworkGroupName}", pathParameters),
+ autorest.WithJSON(managedNetworkGroup),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client GroupsClient) CreateOrUpdateSender(req *http.Request) (future GroupsCreateOrUpdateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client GroupsClient) CreateOrUpdateResponder(resp *http.Response) (result Group, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete the Delete ManagedNetworkGroups operation deletes a Managed Network Group specified by the resource group,
+// Managed Network name, and group name
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// managedNetworkName - the name of the Managed Network.
+// managedNetworkGroupName - the name of the Managed Network Group.
+func (client GroupsClient) Delete(ctx context.Context, resourceGroupName string, managedNetworkName string, managedNetworkGroupName string) (result GroupsDeleteFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/GroupsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, resourceGroupName, managedNetworkName, managedNetworkGroupName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.GroupsClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.DeleteSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.GroupsClient", "Delete", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client GroupsClient) DeletePreparer(ctx context.Context, resourceGroupName string, managedNetworkName string, managedNetworkGroupName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "managedNetworkGroupName": autorest.Encode("path", managedNetworkGroupName),
+ "managedNetworkName": autorest.Encode("path", managedNetworkName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetwork/managedNetworks/{managedNetworkName}/managedNetworkGroups/{managedNetworkGroupName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client GroupsClient) DeleteSender(req *http.Request) (future GroupsDeleteFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client GroupsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get the Get ManagedNetworkGroups operation gets a Managed Network Group specified by the resource group, Managed
+// Network name, and group name
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// managedNetworkName - the name of the Managed Network.
+// managedNetworkGroupName - the name of the Managed Network Group.
+func (client GroupsClient) Get(ctx context.Context, resourceGroupName string, managedNetworkName string, managedNetworkGroupName string) (result Group, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/GroupsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, resourceGroupName, managedNetworkName, managedNetworkGroupName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.GroupsClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "managednetwork.GroupsClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.GroupsClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client GroupsClient) GetPreparer(ctx context.Context, resourceGroupName string, managedNetworkName string, managedNetworkGroupName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "managedNetworkGroupName": autorest.Encode("path", managedNetworkGroupName),
+ "managedNetworkName": autorest.Encode("path", managedNetworkName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetwork/managedNetworks/{managedNetworkName}/managedNetworkGroups/{managedNetworkGroupName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client GroupsClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client GroupsClient) GetResponder(resp *http.Response) (result Group, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByManagedNetwork the ListByManagedNetwork ManagedNetworkGroup operation retrieves all the Managed Network Groups
+// in a specified Managed Networks in a paginated format.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// managedNetworkName - the name of the Managed Network.
+// top - may be used to limit the number of results in a page for list queries.
+// skiptoken - skiptoken is only used if a previous operation returned a partial result. If a previous response
+// contains a nextLink element, the value of the nextLink element will include a skiptoken parameter that
+// specifies a starting point to use for subsequent calls.
+func (client GroupsClient) ListByManagedNetwork(ctx context.Context, resourceGroupName string, managedNetworkName string, top *int32, skiptoken string) (result GroupListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/GroupsClient.ListByManagedNetwork")
+ defer func() {
+ sc := -1
+ if result.glr.Response.Response != nil {
+ sc = result.glr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: top,
+ Constraints: []validation.Constraint{{Target: "top", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "top", Name: validation.InclusiveMaximum, Rule: int64(20), Chain: nil},
+ {Target: "top", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
+ }}}}}); err != nil {
+ return result, validation.NewError("managednetwork.GroupsClient", "ListByManagedNetwork", err.Error())
+ }
+
+ result.fn = client.listByManagedNetworkNextResults
+ req, err := client.ListByManagedNetworkPreparer(ctx, resourceGroupName, managedNetworkName, top, skiptoken)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.GroupsClient", "ListByManagedNetwork", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByManagedNetworkSender(req)
+ if err != nil {
+ result.glr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "managednetwork.GroupsClient", "ListByManagedNetwork", resp, "Failure sending request")
+ return
+ }
+
+ result.glr, err = client.ListByManagedNetworkResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.GroupsClient", "ListByManagedNetwork", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByManagedNetworkPreparer prepares the ListByManagedNetwork request.
+func (client GroupsClient) ListByManagedNetworkPreparer(ctx context.Context, resourceGroupName string, managedNetworkName string, top *int32, skiptoken string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "managedNetworkName": autorest.Encode("path", managedNetworkName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if top != nil {
+ queryParameters["$top"] = autorest.Encode("query", *top)
+ }
+ if len(skiptoken) > 0 {
+ queryParameters["$skiptoken"] = autorest.Encode("query", skiptoken)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetwork/managedNetworks/{managedNetworkName}/managedNetworkGroups", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByManagedNetworkSender sends the ListByManagedNetwork request. The method will close the
+// http.Response Body if it receives an error.
+func (client GroupsClient) ListByManagedNetworkSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByManagedNetworkResponder handles the response to the ListByManagedNetwork request. The method always
+// closes the http.Response Body.
+func (client GroupsClient) ListByManagedNetworkResponder(resp *http.Response) (result GroupListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByManagedNetworkNextResults retrieves the next set of results, if any.
+func (client GroupsClient) listByManagedNetworkNextResults(ctx context.Context, lastResults GroupListResult) (result GroupListResult, err error) {
+ req, err := lastResults.groupListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "managednetwork.GroupsClient", "listByManagedNetworkNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByManagedNetworkSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "managednetwork.GroupsClient", "listByManagedNetworkNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByManagedNetworkResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.GroupsClient", "listByManagedNetworkNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByManagedNetworkComplete enumerates all values, automatically crossing page boundaries as required.
+func (client GroupsClient) ListByManagedNetworkComplete(ctx context.Context, resourceGroupName string, managedNetworkName string, top *int32, skiptoken string) (result GroupListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/GroupsClient.ListByManagedNetwork")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByManagedNetwork(ctx, resourceGroupName, managedNetworkName, top, skiptoken)
+ return
+}
diff --git a/services/preview/managednetwork/mgmt/2019-06-01-preview/managednetwork/managednetworkapi/interfaces.go b/services/preview/managednetwork/mgmt/2019-06-01-preview/managednetwork/managednetworkapi/interfaces.go
new file mode 100644
index 000000000000..a2380cd02d4c
--- /dev/null
+++ b/services/preview/managednetwork/mgmt/2019-06-01-preview/managednetwork/managednetworkapi/interfaces.go
@@ -0,0 +1,73 @@
+package managednetworkapi
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/azure-sdk-for-go/services/preview/managednetwork/mgmt/2019-06-01-preview/managednetwork"
+ "github.com/Azure/go-autorest/autorest"
+)
+
+// ManagedNetworksClientAPI contains the set of methods on the ManagedNetworksClient type.
+type ManagedNetworksClientAPI interface {
+ CreateOrUpdate(ctx context.Context, managedNetwork managednetwork.ManagedNetwork, resourceGroupName string, managedNetworkName string) (result managednetwork.ManagedNetwork, err error)
+ Delete(ctx context.Context, resourceGroupName string, managedNetworkName string) (result managednetwork.ManagedNetworksDeleteFutureType, err error)
+ Get(ctx context.Context, resourceGroupName string, managedNetworkName string) (result managednetwork.ManagedNetwork, err error)
+ ListByResourceGroup(ctx context.Context, resourceGroupName string, top *int32, skiptoken string) (result managednetwork.ListResultPage, err error)
+ ListBySubscription(ctx context.Context, top *int32, skiptoken string) (result managednetwork.ListResultPage, err error)
+ Update(ctx context.Context, parameters managednetwork.Update, resourceGroupName string, managedNetworkName string) (result managednetwork.ManagedNetworksUpdateFutureType, err error)
+}
+
+var _ ManagedNetworksClientAPI = (*managednetwork.ManagedNetworksClient)(nil)
+
+// ScopeAssignmentsClientAPI contains the set of methods on the ScopeAssignmentsClient type.
+type ScopeAssignmentsClientAPI interface {
+ CreateOrUpdate(ctx context.Context, parameters managednetwork.ScopeAssignment, scope string, scopeAssignmentName string) (result managednetwork.ScopeAssignment, err error)
+ Delete(ctx context.Context, scope string, scopeAssignmentName string) (result autorest.Response, err error)
+ Get(ctx context.Context, scope string, scopeAssignmentName string) (result managednetwork.ScopeAssignment, err error)
+ List(ctx context.Context, scope string) (result managednetwork.ScopeAssignmentListResultPage, err error)
+}
+
+var _ ScopeAssignmentsClientAPI = (*managednetwork.ScopeAssignmentsClient)(nil)
+
+// GroupsClientAPI contains the set of methods on the GroupsClient type.
+type GroupsClientAPI interface {
+ CreateOrUpdate(ctx context.Context, managedNetworkGroup managednetwork.Group, resourceGroupName string, managedNetworkName string, managedNetworkGroupName string) (result managednetwork.GroupsCreateOrUpdateFuture, err error)
+ Delete(ctx context.Context, resourceGroupName string, managedNetworkName string, managedNetworkGroupName string) (result managednetwork.GroupsDeleteFuture, err error)
+ Get(ctx context.Context, resourceGroupName string, managedNetworkName string, managedNetworkGroupName string) (result managednetwork.Group, err error)
+ ListByManagedNetwork(ctx context.Context, resourceGroupName string, managedNetworkName string, top *int32, skiptoken string) (result managednetwork.GroupListResultPage, err error)
+}
+
+var _ GroupsClientAPI = (*managednetwork.GroupsClient)(nil)
+
+// PeeringPoliciesClientAPI contains the set of methods on the PeeringPoliciesClient type.
+type PeeringPoliciesClientAPI interface {
+ CreateOrUpdate(ctx context.Context, managedNetworkPolicy managednetwork.PeeringPolicy, resourceGroupName string, managedNetworkName string, managedNetworkPeeringPolicyName string) (result managednetwork.PeeringPoliciesCreateOrUpdateFuture, err error)
+ Delete(ctx context.Context, resourceGroupName string, managedNetworkName string, managedNetworkPeeringPolicyName string) (result managednetwork.PeeringPoliciesDeleteFuture, err error)
+ Get(ctx context.Context, resourceGroupName string, managedNetworkName string, managedNetworkPeeringPolicyName string) (result managednetwork.PeeringPolicy, err error)
+ ListByManagedNetwork(ctx context.Context, resourceGroupName string, managedNetworkName string, top *int32, skiptoken string) (result managednetwork.PeeringPolicyListResultPage, err error)
+}
+
+var _ PeeringPoliciesClientAPI = (*managednetwork.PeeringPoliciesClient)(nil)
+
+// OperationsClientAPI contains the set of methods on the OperationsClient type.
+type OperationsClientAPI interface {
+ List(ctx context.Context) (result managednetwork.OperationListResultPage, err error)
+}
+
+var _ OperationsClientAPI = (*managednetwork.OperationsClient)(nil)
diff --git a/services/preview/managednetwork/mgmt/2019-06-01-preview/managednetwork/managednetworks.go b/services/preview/managednetwork/mgmt/2019-06-01-preview/managednetwork/managednetworks.go
new file mode 100644
index 000000000000..c8f386b685fb
--- /dev/null
+++ b/services/preview/managednetwork/mgmt/2019-06-01-preview/managednetwork/managednetworks.go
@@ -0,0 +1,624 @@
+package managednetwork
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// ManagedNetworksClient is the the Microsoft Azure Managed Network management API provides a RESTful set of web
+// services that interact with Microsoft Azure Networks service to programmatically view, control, change, and monitor
+// your entire Azure network centrally and with ease.
+type ManagedNetworksClient struct {
+ BaseClient
+}
+
+// NewManagedNetworksClient creates an instance of the ManagedNetworksClient client.
+func NewManagedNetworksClient(subscriptionID string) ManagedNetworksClient {
+ return NewManagedNetworksClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewManagedNetworksClientWithBaseURI creates an instance of the ManagedNetworksClient client.
+func NewManagedNetworksClientWithBaseURI(baseURI string, subscriptionID string) ManagedNetworksClient {
+ return ManagedNetworksClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate the Put ManagedNetworks operation creates/updates a Managed Network Resource, specified by resource
+// group and Managed Network name
+// Parameters:
+// managedNetwork - parameters supplied to the create/update a Managed Network Resource
+// resourceGroupName - the name of the resource group.
+// managedNetworkName - the name of the Managed Network.
+func (client ManagedNetworksClient) CreateOrUpdate(ctx context.Context, managedNetwork ManagedNetwork, resourceGroupName string, managedNetworkName string) (result ManagedNetwork, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedNetworksClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.CreateOrUpdatePreparer(ctx, managedNetwork, resourceGroupName, managedNetworkName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.ManagedNetworksClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateOrUpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "managednetwork.ManagedNetworksClient", "CreateOrUpdate", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.ManagedNetworksClient", "CreateOrUpdate", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client ManagedNetworksClient) CreateOrUpdatePreparer(ctx context.Context, managedNetwork ManagedNetwork, resourceGroupName string, managedNetworkName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "managedNetworkName": autorest.Encode("path", managedNetworkName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetwork/managedNetworks/{managedNetworkName}", pathParameters),
+ autorest.WithJSON(managedNetwork),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client ManagedNetworksClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client ManagedNetworksClient) CreateOrUpdateResponder(resp *http.Response) (result ManagedNetwork, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete the Delete ManagedNetworks operation deletes a Managed Network Resource, specified by the resource group and
+// Managed Network name
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// managedNetworkName - the name of the Managed Network.
+func (client ManagedNetworksClient) Delete(ctx context.Context, resourceGroupName string, managedNetworkName string) (result ManagedNetworksDeleteFutureType, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedNetworksClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, resourceGroupName, managedNetworkName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.ManagedNetworksClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.DeleteSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.ManagedNetworksClient", "Delete", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client ManagedNetworksClient) DeletePreparer(ctx context.Context, resourceGroupName string, managedNetworkName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "managedNetworkName": autorest.Encode("path", managedNetworkName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetwork/managedNetworks/{managedNetworkName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client ManagedNetworksClient) DeleteSender(req *http.Request) (future ManagedNetworksDeleteFutureType, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client ManagedNetworksClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get the Get ManagedNetworks operation gets a Managed Network Resource, specified by the resource group and Managed
+// Network name
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// managedNetworkName - the name of the Managed Network.
+func (client ManagedNetworksClient) Get(ctx context.Context, resourceGroupName string, managedNetworkName string) (result ManagedNetwork, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedNetworksClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, resourceGroupName, managedNetworkName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.ManagedNetworksClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "managednetwork.ManagedNetworksClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.ManagedNetworksClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client ManagedNetworksClient) GetPreparer(ctx context.Context, resourceGroupName string, managedNetworkName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "managedNetworkName": autorest.Encode("path", managedNetworkName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetwork/managedNetworks/{managedNetworkName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client ManagedNetworksClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client ManagedNetworksClient) GetResponder(resp *http.Response) (result ManagedNetwork, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByResourceGroup the ListByResourceGroup ManagedNetwork operation retrieves all the Managed Network resources in
+// a resource group in a paginated format.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// top - may be used to limit the number of results in a page for list queries.
+// skiptoken - skiptoken is only used if a previous operation returned a partial result. If a previous response
+// contains a nextLink element, the value of the nextLink element will include a skiptoken parameter that
+// specifies a starting point to use for subsequent calls.
+func (client ManagedNetworksClient) ListByResourceGroup(ctx context.Context, resourceGroupName string, top *int32, skiptoken string) (result ListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedNetworksClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.lr.Response.Response != nil {
+ sc = result.lr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: top,
+ Constraints: []validation.Constraint{{Target: "top", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "top", Name: validation.InclusiveMaximum, Rule: int64(20), Chain: nil},
+ {Target: "top", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
+ }}}}}); err != nil {
+ return result, validation.NewError("managednetwork.ManagedNetworksClient", "ListByResourceGroup", err.Error())
+ }
+
+ result.fn = client.listByResourceGroupNextResults
+ req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName, top, skiptoken)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.ManagedNetworksClient", "ListByResourceGroup", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByResourceGroupSender(req)
+ if err != nil {
+ result.lr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "managednetwork.ManagedNetworksClient", "ListByResourceGroup", resp, "Failure sending request")
+ return
+ }
+
+ result.lr, err = client.ListByResourceGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.ManagedNetworksClient", "ListByResourceGroup", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
+func (client ManagedNetworksClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string, top *int32, skiptoken string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if top != nil {
+ queryParameters["$top"] = autorest.Encode("query", *top)
+ }
+ if len(skiptoken) > 0 {
+ queryParameters["$skiptoken"] = autorest.Encode("query", skiptoken)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetwork/managedNetworks", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
+// http.Response Body if it receives an error.
+func (client ManagedNetworksClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
+// closes the http.Response Body.
+func (client ManagedNetworksClient) ListByResourceGroupResponder(resp *http.Response) (result ListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByResourceGroupNextResults retrieves the next set of results, if any.
+func (client ManagedNetworksClient) listByResourceGroupNextResults(ctx context.Context, lastResults ListResult) (result ListResult, err error) {
+ req, err := lastResults.listResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "managednetwork.ManagedNetworksClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByResourceGroupSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "managednetwork.ManagedNetworksClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByResourceGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.ManagedNetworksClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
+func (client ManagedNetworksClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string, top *int32, skiptoken string) (result ListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedNetworksClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByResourceGroup(ctx, resourceGroupName, top, skiptoken)
+ return
+}
+
+// ListBySubscription the ListBySubscription ManagedNetwork operation retrieves all the Managed Network Resources in
+// the current subscription in a paginated format.
+// Parameters:
+// top - may be used to limit the number of results in a page for list queries.
+// skiptoken - skiptoken is only used if a previous operation returned a partial result. If a previous response
+// contains a nextLink element, the value of the nextLink element will include a skiptoken parameter that
+// specifies a starting point to use for subsequent calls.
+func (client ManagedNetworksClient) ListBySubscription(ctx context.Context, top *int32, skiptoken string) (result ListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedNetworksClient.ListBySubscription")
+ defer func() {
+ sc := -1
+ if result.lr.Response.Response != nil {
+ sc = result.lr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: top,
+ Constraints: []validation.Constraint{{Target: "top", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "top", Name: validation.InclusiveMaximum, Rule: int64(20), Chain: nil},
+ {Target: "top", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
+ }}}}}); err != nil {
+ return result, validation.NewError("managednetwork.ManagedNetworksClient", "ListBySubscription", err.Error())
+ }
+
+ result.fn = client.listBySubscriptionNextResults
+ req, err := client.ListBySubscriptionPreparer(ctx, top, skiptoken)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.ManagedNetworksClient", "ListBySubscription", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListBySubscriptionSender(req)
+ if err != nil {
+ result.lr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "managednetwork.ManagedNetworksClient", "ListBySubscription", resp, "Failure sending request")
+ return
+ }
+
+ result.lr, err = client.ListBySubscriptionResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.ManagedNetworksClient", "ListBySubscription", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListBySubscriptionPreparer prepares the ListBySubscription request.
+func (client ManagedNetworksClient) ListBySubscriptionPreparer(ctx context.Context, top *int32, skiptoken string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if top != nil {
+ queryParameters["$top"] = autorest.Encode("query", *top)
+ }
+ if len(skiptoken) > 0 {
+ queryParameters["$skiptoken"] = autorest.Encode("query", skiptoken)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.ManagedNetwork/managedNetworks", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListBySubscriptionSender sends the ListBySubscription request. The method will close the
+// http.Response Body if it receives an error.
+func (client ManagedNetworksClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always
+// closes the http.Response Body.
+func (client ManagedNetworksClient) ListBySubscriptionResponder(resp *http.Response) (result ListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listBySubscriptionNextResults retrieves the next set of results, if any.
+func (client ManagedNetworksClient) listBySubscriptionNextResults(ctx context.Context, lastResults ListResult) (result ListResult, err error) {
+ req, err := lastResults.listResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "managednetwork.ManagedNetworksClient", "listBySubscriptionNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListBySubscriptionSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "managednetwork.ManagedNetworksClient", "listBySubscriptionNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListBySubscriptionResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.ManagedNetworksClient", "listBySubscriptionNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListBySubscriptionComplete enumerates all values, automatically crossing page boundaries as required.
+func (client ManagedNetworksClient) ListBySubscriptionComplete(ctx context.Context, top *int32, skiptoken string) (result ListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedNetworksClient.ListBySubscription")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListBySubscription(ctx, top, skiptoken)
+ return
+}
+
+// Update updates the specified Managed Network resource tags.
+// Parameters:
+// parameters - parameters supplied to update application gateway tags and/or scope.
+// resourceGroupName - the name of the resource group.
+// managedNetworkName - the name of the Managed Network.
+func (client ManagedNetworksClient) Update(ctx context.Context, parameters Update, resourceGroupName string, managedNetworkName string) (result ManagedNetworksUpdateFutureType, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedNetworksClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.UpdatePreparer(ctx, parameters, resourceGroupName, managedNetworkName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.ManagedNetworksClient", "Update", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.UpdateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.ManagedNetworksClient", "Update", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// UpdatePreparer prepares the Update request.
+func (client ManagedNetworksClient) UpdatePreparer(ctx context.Context, parameters Update, resourceGroupName string, managedNetworkName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "managedNetworkName": autorest.Encode("path", managedNetworkName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetwork/managedNetworks/{managedNetworkName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateSender sends the Update request. The method will close the
+// http.Response Body if it receives an error.
+func (client ManagedNetworksClient) UpdateSender(req *http.Request) (future ManagedNetworksUpdateFutureType, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// UpdateResponder handles the response to the Update request. The method always
+// closes the http.Response Body.
+func (client ManagedNetworksClient) UpdateResponder(resp *http.Response) (result ManagedNetwork, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/preview/managednetwork/mgmt/2019-06-01-preview/managednetwork/models.go b/services/preview/managednetwork/mgmt/2019-06-01-preview/managednetwork/models.go
new file mode 100644
index 000000000000..5b1fc536f0d2
--- /dev/null
+++ b/services/preview/managednetwork/mgmt/2019-06-01-preview/managednetwork/models.go
@@ -0,0 +1,1430 @@
+package managednetwork
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "encoding/json"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/to"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// The package's fully qualified name.
+const fqdn = "github.com/Azure/azure-sdk-for-go/services/preview/managednetwork/mgmt/2019-06-01-preview/managednetwork"
+
+// Kind enumerates the values for kind.
+type Kind string
+
+const (
+ // Connectivity ...
+ Connectivity Kind = "Connectivity"
+)
+
+// PossibleKindValues returns an array of possible values for the Kind const type.
+func PossibleKindValues() []Kind {
+ return []Kind{Connectivity}
+}
+
+// ProvisioningState enumerates the values for provisioning state.
+type ProvisioningState string
+
+const (
+ // Deleting ...
+ Deleting ProvisioningState = "Deleting"
+ // Failed ...
+ Failed ProvisioningState = "Failed"
+ // Succeeded ...
+ Succeeded ProvisioningState = "Succeeded"
+ // Updating ...
+ Updating ProvisioningState = "Updating"
+)
+
+// PossibleProvisioningStateValues returns an array of possible values for the ProvisioningState const type.
+func PossibleProvisioningStateValues() []ProvisioningState {
+ return []ProvisioningState{Deleting, Failed, Succeeded, Updating}
+}
+
+// Type enumerates the values for type.
+type Type string
+
+const (
+ // HubAndSpokeTopology ...
+ HubAndSpokeTopology Type = "HubAndSpokeTopology"
+ // MeshTopology ...
+ MeshTopology Type = "MeshTopology"
+)
+
+// PossibleTypeValues returns an array of possible values for the Type const type.
+func PossibleTypeValues() []Type {
+ return []Type{HubAndSpokeTopology, MeshTopology}
+}
+
+// ConnectivityCollection the collection of Connectivity related groups and policies within the Managed
+// Network
+type ConnectivityCollection struct {
+ // Groups - READ-ONLY; The collection of connectivity related Managed Network Groups within the Managed Network
+ Groups *[]Group `json:"groups,omitempty"`
+ // Peerings - READ-ONLY; The collection of Managed Network Peering Policies within the Managed Network
+ Peerings *[]PeeringPolicy `json:"peerings,omitempty"`
+}
+
+// ErrorResponse the error response that indicates why an operation has failed.
+type ErrorResponse struct {
+ // Code - READ-ONLY; The error code.
+ Code *string `json:"code,omitempty"`
+ // Message - READ-ONLY; The error message.
+ Message *string `json:"message,omitempty"`
+}
+
+// Group the Managed Network Group resource
+type Group struct {
+ autorest.Response `json:"-"`
+ // GroupProperties - Gets or sets the properties of a network group
+ *GroupProperties `json:"properties,omitempty"`
+ // Kind - Responsibility role under which this Managed Network Group will be created. Possible values include: 'Connectivity'
+ Kind Kind `json:"kind,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for Group.
+func (g Group) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if g.GroupProperties != nil {
+ objectMap["properties"] = g.GroupProperties
+ }
+ if g.Kind != "" {
+ objectMap["kind"] = g.Kind
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for Group struct.
+func (g *Group) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var groupProperties GroupProperties
+ err = json.Unmarshal(*v, &groupProperties)
+ if err != nil {
+ return err
+ }
+ g.GroupProperties = &groupProperties
+ }
+ case "kind":
+ if v != nil {
+ var kind Kind
+ err = json.Unmarshal(*v, &kind)
+ if err != nil {
+ return err
+ }
+ g.Kind = kind
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ g.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ g.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ g.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// GroupListResult result of the request to list Managed Network Groups. It contains a list of groups and a
+// URL link to get the next set of results.
+type GroupListResult struct {
+ autorest.Response `json:"-"`
+ // Value - Gets a page of ManagedNetworkGroup
+ Value *[]Group `json:"value,omitempty"`
+ // NextLink - Gets the URL to get the next set of results.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// GroupListResultIterator provides access to a complete listing of Group values.
+type GroupListResultIterator struct {
+ i int
+ page GroupListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *GroupListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/GroupListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *GroupListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter GroupListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter GroupListResultIterator) Response() GroupListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter GroupListResultIterator) Value() Group {
+ if !iter.page.NotDone() {
+ return Group{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the GroupListResultIterator type.
+func NewGroupListResultIterator(page GroupListResultPage) GroupListResultIterator {
+ return GroupListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (glr GroupListResult) IsEmpty() bool {
+ return glr.Value == nil || len(*glr.Value) == 0
+}
+
+// groupListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (glr GroupListResult) groupListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if glr.NextLink == nil || len(to.String(glr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(glr.NextLink)))
+}
+
+// GroupListResultPage contains a page of Group values.
+type GroupListResultPage struct {
+ fn func(context.Context, GroupListResult) (GroupListResult, error)
+ glr GroupListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *GroupListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/GroupListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.glr)
+ if err != nil {
+ return err
+ }
+ page.glr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *GroupListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page GroupListResultPage) NotDone() bool {
+ return !page.glr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page GroupListResultPage) Response() GroupListResult {
+ return page.glr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page GroupListResultPage) Values() []Group {
+ if page.glr.IsEmpty() {
+ return nil
+ }
+ return *page.glr.Value
+}
+
+// Creates a new instance of the GroupListResultPage type.
+func NewGroupListResultPage(getNextPage func(context.Context, GroupListResult) (GroupListResult, error)) GroupListResultPage {
+ return GroupListResultPage{fn: getNextPage}
+}
+
+// GroupProperties properties of a Managed Network Group
+type GroupProperties struct {
+ // ManagementGroups - The collection of management groups covered by the Managed Network
+ ManagementGroups *[]ResourceID `json:"managementGroups,omitempty"`
+ // Subscriptions - The collection of subscriptions covered by the Managed Network
+ Subscriptions *[]ResourceID `json:"subscriptions,omitempty"`
+ // VirtualNetworks - The collection of virtual nets covered by the Managed Network
+ VirtualNetworks *[]ResourceID `json:"virtualNetworks,omitempty"`
+ // Subnets - The collection of subnets covered by the Managed Network
+ Subnets *[]ResourceID `json:"subnets,omitempty"`
+ // ProvisioningState - READ-ONLY; Provisioning state of the ManagedNetwork resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. Possible values include: 'Updating', 'Deleting', 'Failed', 'Succeeded'
+ ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
+ // Etag - READ-ONLY; A unique read-only string that changes whenever the resource is updated.
+ Etag *string `json:"etag,omitempty"`
+}
+
+// GroupsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type GroupsCreateOrUpdateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *GroupsCreateOrUpdateFuture) Result(client GroupsClient) (g Group, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.GroupsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("managednetwork.GroupsCreateOrUpdateFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if g.Response.Response, err = future.GetResult(sender); err == nil && g.Response.Response.StatusCode != http.StatusNoContent {
+ g, err = client.CreateOrUpdateResponder(g.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.GroupsCreateOrUpdateFuture", "Result", g.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// GroupsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation.
+type GroupsDeleteFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *GroupsDeleteFuture) Result(client GroupsClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.GroupsDeleteFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("managednetwork.GroupsDeleteFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// HubAndSpokePeeringPolicyProperties properties of a Hub and Spoke Peering Policy
+type HubAndSpokePeeringPolicyProperties struct {
+ // Type - Gets or sets the connectivity type of a network structure policy. Possible values include: 'HubAndSpokeTopology', 'MeshTopology'
+ Type Type `json:"type,omitempty"`
+ // Hub - Gets or sets the hub virtual network ID
+ Hub *ResourceID `json:"hub,omitempty"`
+ // Spokes - Gets or sets the spokes group IDs
+ Spokes *[]ResourceID `json:"spokes,omitempty"`
+ // Mesh - Gets or sets the mesh group IDs
+ Mesh *[]ResourceID `json:"mesh,omitempty"`
+ // ProvisioningState - READ-ONLY; Provisioning state of the ManagedNetwork resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. Possible values include: 'Updating', 'Deleting', 'Failed', 'Succeeded'
+ ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
+ // Etag - READ-ONLY; A unique read-only string that changes whenever the resource is updated.
+ Etag *string `json:"etag,omitempty"`
+}
+
+// ListResult result of the request to list Managed Network. It contains a list of Managed Networks and a
+// URL link to get the next set of results.
+type ListResult struct {
+ autorest.Response `json:"-"`
+ // Value - Gets a page of ManagedNetworks
+ Value *[]ManagedNetwork `json:"value,omitempty"`
+ // NextLink - Gets the URL to get the next page of results.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// ListResultIterator provides access to a complete listing of ManagedNetwork values.
+type ListResultIterator struct {
+ i int
+ page ListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *ListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *ListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter ListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter ListResultIterator) Response() ListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter ListResultIterator) Value() ManagedNetwork {
+ if !iter.page.NotDone() {
+ return ManagedNetwork{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the ListResultIterator type.
+func NewListResultIterator(page ListResultPage) ListResultIterator {
+ return ListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (lr ListResult) IsEmpty() bool {
+ return lr.Value == nil || len(*lr.Value) == 0
+}
+
+// listResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (lr ListResult) listResultPreparer(ctx context.Context) (*http.Request, error) {
+ if lr.NextLink == nil || len(to.String(lr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(lr.NextLink)))
+}
+
+// ListResultPage contains a page of ManagedNetwork values.
+type ListResultPage struct {
+ fn func(context.Context, ListResult) (ListResult, error)
+ lr ListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *ListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.lr)
+ if err != nil {
+ return err
+ }
+ page.lr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *ListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page ListResultPage) NotDone() bool {
+ return !page.lr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page ListResultPage) Response() ListResult {
+ return page.lr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page ListResultPage) Values() []ManagedNetwork {
+ if page.lr.IsEmpty() {
+ return nil
+ }
+ return *page.lr.Value
+}
+
+// Creates a new instance of the ListResultPage type.
+func NewListResultPage(getNextPage func(context.Context, ListResult) (ListResult, error)) ListResultPage {
+ return ListResultPage{fn: getNextPage}
+}
+
+// ManagedNetwork the Managed Network resource
+type ManagedNetwork struct {
+ autorest.Response `json:"-"`
+ // Properties - The MNC properties
+ *Properties `json:"properties,omitempty"`
+ // Tags - Resource tags.
+ Tags *[]string `json:"tags,omitempty"`
+ // Location - The geo-location where the resource lives
+ Location *string `json:"location,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ManagedNetwork.
+func (mn ManagedNetwork) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if mn.Properties != nil {
+ objectMap["properties"] = mn.Properties
+ }
+ if mn.Tags != nil {
+ objectMap["tags"] = mn.Tags
+ }
+ if mn.Location != nil {
+ objectMap["location"] = mn.Location
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ManagedNetwork struct.
+func (mn *ManagedNetwork) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var properties Properties
+ err = json.Unmarshal(*v, &properties)
+ if err != nil {
+ return err
+ }
+ mn.Properties = &properties
+ }
+ case "tags":
+ if v != nil {
+ var tags []string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ mn.Tags = &tags
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ mn.Location = &location
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ mn.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ mn.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ mn.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// ManagedNetworksDeleteFutureType an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type ManagedNetworksDeleteFutureType struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *ManagedNetworksDeleteFutureType) Result(client ManagedNetworksClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.ManagedNetworksDeleteFutureType", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("managednetwork.ManagedNetworksDeleteFutureType")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// ManagedNetworksUpdateFutureType an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type ManagedNetworksUpdateFutureType struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *ManagedNetworksUpdateFutureType) Result(client ManagedNetworksClient) (mn ManagedNetwork, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.ManagedNetworksUpdateFutureType", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("managednetwork.ManagedNetworksUpdateFutureType")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if mn.Response.Response, err = future.GetResult(sender); err == nil && mn.Response.Response.StatusCode != http.StatusNoContent {
+ mn, err = client.UpdateResponder(mn.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.ManagedNetworksUpdateFutureType", "Result", mn.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// MeshPeeringPolicyProperties properties of a Mesh Peering Policy
+type MeshPeeringPolicyProperties struct {
+ // Type - Gets or sets the connectivity type of a network structure policy. Possible values include: 'HubAndSpokeTopology', 'MeshTopology'
+ Type Type `json:"type,omitempty"`
+ // Hub - Gets or sets the hub virtual network ID
+ Hub *ResourceID `json:"hub,omitempty"`
+ // Spokes - Gets or sets the spokes group IDs
+ Spokes *[]ResourceID `json:"spokes,omitempty"`
+ // Mesh - Gets or sets the mesh group IDs
+ Mesh *[]ResourceID `json:"mesh,omitempty"`
+ // ProvisioningState - READ-ONLY; Provisioning state of the ManagedNetwork resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. Possible values include: 'Updating', 'Deleting', 'Failed', 'Succeeded'
+ ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
+ // Etag - READ-ONLY; A unique read-only string that changes whenever the resource is updated.
+ Etag *string `json:"etag,omitempty"`
+}
+
+// Operation REST API operation
+type Operation struct {
+ // Name - Operation name: {provider}/{resource}/{operation}
+ Name *string `json:"name,omitempty"`
+ // Display - The object that represents the operation.
+ Display *OperationDisplay `json:"display,omitempty"`
+}
+
+// OperationDisplay the object that represents the operation.
+type OperationDisplay struct {
+ // Provider - Service provider: Microsoft.ManagedNetwork
+ Provider *string `json:"provider,omitempty"`
+ // Resource - Resource on which the operation is performed: Profile, endpoint, etc.
+ Resource *string `json:"resource,omitempty"`
+ // Operation - Operation type: Read, write, delete, etc.
+ Operation *string `json:"operation,omitempty"`
+}
+
+// OperationListResult result of the request to list Managed Network operations. It contains a list of
+// operations and a URL link to get the next set of results.
+type OperationListResult struct {
+ autorest.Response `json:"-"`
+ // Value - List of Resource Provider operations supported by the Managed Network resource provider.
+ Value *[]Operation `json:"value,omitempty"`
+ // NextLink - URL to get the next set of operation list results if there are any.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// OperationListResultIterator provides access to a complete listing of Operation values.
+type OperationListResultIterator struct {
+ i int
+ page OperationListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *OperationListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OperationListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *OperationListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter OperationListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter OperationListResultIterator) Response() OperationListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter OperationListResultIterator) Value() Operation {
+ if !iter.page.NotDone() {
+ return Operation{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the OperationListResultIterator type.
+func NewOperationListResultIterator(page OperationListResultPage) OperationListResultIterator {
+ return OperationListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (olr OperationListResult) IsEmpty() bool {
+ return olr.Value == nil || len(*olr.Value) == 0
+}
+
+// operationListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (olr OperationListResult) operationListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if olr.NextLink == nil || len(to.String(olr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(olr.NextLink)))
+}
+
+// OperationListResultPage contains a page of Operation values.
+type OperationListResultPage struct {
+ fn func(context.Context, OperationListResult) (OperationListResult, error)
+ olr OperationListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *OperationListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OperationListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.olr)
+ if err != nil {
+ return err
+ }
+ page.olr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *OperationListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page OperationListResultPage) NotDone() bool {
+ return !page.olr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page OperationListResultPage) Response() OperationListResult {
+ return page.olr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page OperationListResultPage) Values() []Operation {
+ if page.olr.IsEmpty() {
+ return nil
+ }
+ return *page.olr.Value
+}
+
+// Creates a new instance of the OperationListResultPage type.
+func NewOperationListResultPage(getNextPage func(context.Context, OperationListResult) (OperationListResult, error)) OperationListResultPage {
+ return OperationListResultPage{fn: getNextPage}
+}
+
+// PeeringPoliciesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type PeeringPoliciesCreateOrUpdateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *PeeringPoliciesCreateOrUpdateFuture) Result(client PeeringPoliciesClient) (pp PeeringPolicy, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.PeeringPoliciesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("managednetwork.PeeringPoliciesCreateOrUpdateFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if pp.Response.Response, err = future.GetResult(sender); err == nil && pp.Response.Response.StatusCode != http.StatusNoContent {
+ pp, err = client.CreateOrUpdateResponder(pp.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.PeeringPoliciesCreateOrUpdateFuture", "Result", pp.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// PeeringPoliciesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type PeeringPoliciesDeleteFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *PeeringPoliciesDeleteFuture) Result(client PeeringPoliciesClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.PeeringPoliciesDeleteFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("managednetwork.PeeringPoliciesDeleteFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// PeeringPolicy the Managed Network Peering Policy resource
+type PeeringPolicy struct {
+ autorest.Response `json:"-"`
+ // Properties - Gets or sets the properties of a Managed Network Policy
+ Properties *PeeringPolicyProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
+ Type *string `json:"type,omitempty"`
+}
+
+// PeeringPolicyListResult result of the request to list Managed Network Peering Policies. It contains a
+// list of policies and a URL link to get the next set of results.
+type PeeringPolicyListResult struct {
+ autorest.Response `json:"-"`
+ // Value - Gets a page of Peering Policies
+ Value *[]PeeringPolicy `json:"value,omitempty"`
+ // NextLink - Gets the URL to get the next page of results.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// PeeringPolicyListResultIterator provides access to a complete listing of PeeringPolicy values.
+type PeeringPolicyListResultIterator struct {
+ i int
+ page PeeringPolicyListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *PeeringPolicyListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PeeringPolicyListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *PeeringPolicyListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter PeeringPolicyListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter PeeringPolicyListResultIterator) Response() PeeringPolicyListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter PeeringPolicyListResultIterator) Value() PeeringPolicy {
+ if !iter.page.NotDone() {
+ return PeeringPolicy{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the PeeringPolicyListResultIterator type.
+func NewPeeringPolicyListResultIterator(page PeeringPolicyListResultPage) PeeringPolicyListResultIterator {
+ return PeeringPolicyListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (pplr PeeringPolicyListResult) IsEmpty() bool {
+ return pplr.Value == nil || len(*pplr.Value) == 0
+}
+
+// peeringPolicyListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (pplr PeeringPolicyListResult) peeringPolicyListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if pplr.NextLink == nil || len(to.String(pplr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(pplr.NextLink)))
+}
+
+// PeeringPolicyListResultPage contains a page of PeeringPolicy values.
+type PeeringPolicyListResultPage struct {
+ fn func(context.Context, PeeringPolicyListResult) (PeeringPolicyListResult, error)
+ pplr PeeringPolicyListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *PeeringPolicyListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PeeringPolicyListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.pplr)
+ if err != nil {
+ return err
+ }
+ page.pplr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *PeeringPolicyListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page PeeringPolicyListResultPage) NotDone() bool {
+ return !page.pplr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page PeeringPolicyListResultPage) Response() PeeringPolicyListResult {
+ return page.pplr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page PeeringPolicyListResultPage) Values() []PeeringPolicy {
+ if page.pplr.IsEmpty() {
+ return nil
+ }
+ return *page.pplr.Value
+}
+
+// Creates a new instance of the PeeringPolicyListResultPage type.
+func NewPeeringPolicyListResultPage(getNextPage func(context.Context, PeeringPolicyListResult) (PeeringPolicyListResult, error)) PeeringPolicyListResultPage {
+ return PeeringPolicyListResultPage{fn: getNextPage}
+}
+
+// PeeringPolicyProperties properties of a Managed Network Peering Policy
+type PeeringPolicyProperties struct {
+ // Type - Gets or sets the connectivity type of a network structure policy. Possible values include: 'HubAndSpokeTopology', 'MeshTopology'
+ Type Type `json:"type,omitempty"`
+ // Hub - Gets or sets the hub virtual network ID
+ Hub *ResourceID `json:"hub,omitempty"`
+ // Spokes - Gets or sets the spokes group IDs
+ Spokes *[]ResourceID `json:"spokes,omitempty"`
+ // Mesh - Gets or sets the mesh group IDs
+ Mesh *[]ResourceID `json:"mesh,omitempty"`
+ // ProvisioningState - READ-ONLY; Provisioning state of the ManagedNetwork resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. Possible values include: 'Updating', 'Deleting', 'Failed', 'Succeeded'
+ ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
+ // Etag - READ-ONLY; A unique read-only string that changes whenever the resource is updated.
+ Etag *string `json:"etag,omitempty"`
+}
+
+// Properties properties of Managed Network
+type Properties struct {
+ // Scope - The collection of management groups, subscriptions, virtual networks, and subnets by the Managed Network. This is a read-only property that is reflective of all ScopeAssignments for this Managed Network
+ Scope *Scope `json:"scope,omitempty"`
+ // Connectivity - READ-ONLY; The collection of groups and policies concerned with connectivity
+ Connectivity *ConnectivityCollection `json:"connectivity,omitempty"`
+ // ProvisioningState - READ-ONLY; Provisioning state of the ManagedNetwork resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. Possible values include: 'Updating', 'Deleting', 'Failed', 'Succeeded'
+ ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
+ // Etag - READ-ONLY; A unique read-only string that changes whenever the resource is updated.
+ Etag *string `json:"etag,omitempty"`
+}
+
+// ProxyResource the resource model definition for a ARM proxy resource. It will have everything other than
+// required location and tags
+type ProxyResource struct {
+ // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
+ Type *string `json:"type,omitempty"`
+}
+
+// Resource the general resource model definition
+type Resource struct {
+ // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
+ Type *string `json:"type,omitempty"`
+}
+
+// ResourceID generic pointer to a resource
+type ResourceID struct {
+ // ID - Resource Id
+ ID *string `json:"id,omitempty"`
+}
+
+// ResourceProperties base for resource properties.
+type ResourceProperties struct {
+ // ProvisioningState - READ-ONLY; Provisioning state of the ManagedNetwork resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. Possible values include: 'Updating', 'Deleting', 'Failed', 'Succeeded'
+ ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
+ // Etag - READ-ONLY; A unique read-only string that changes whenever the resource is updated.
+ Etag *string `json:"etag,omitempty"`
+}
+
+// Scope scope of a Managed Network
+type Scope struct {
+ // ManagementGroups - The collection of management groups covered by the Managed Network
+ ManagementGroups *[]ResourceID `json:"managementGroups,omitempty"`
+ // Subscriptions - The collection of subscriptions covered by the Managed Network
+ Subscriptions *[]ResourceID `json:"subscriptions,omitempty"`
+ // VirtualNetworks - The collection of virtual nets covered by the Managed Network
+ VirtualNetworks *[]ResourceID `json:"virtualNetworks,omitempty"`
+ // Subnets - The collection of subnets covered by the Managed Network
+ Subnets *[]ResourceID `json:"subnets,omitempty"`
+}
+
+// ScopeAssignment the Managed Network resource
+type ScopeAssignment struct {
+ autorest.Response `json:"-"`
+ // ScopeAssignmentProperties - The Scope Assignment properties
+ *ScopeAssignmentProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ScopeAssignment.
+func (sa ScopeAssignment) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if sa.ScopeAssignmentProperties != nil {
+ objectMap["properties"] = sa.ScopeAssignmentProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ScopeAssignment struct.
+func (sa *ScopeAssignment) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var scopeAssignmentProperties ScopeAssignmentProperties
+ err = json.Unmarshal(*v, &scopeAssignmentProperties)
+ if err != nil {
+ return err
+ }
+ sa.ScopeAssignmentProperties = &scopeAssignmentProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ sa.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ sa.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ sa.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// ScopeAssignmentListResult result of the request to list ScopeAssignment. It contains a list of groups
+// and a URL link to get the next set of results.
+type ScopeAssignmentListResult struct {
+ autorest.Response `json:"-"`
+ // Value - Gets a page of ScopeAssignment
+ Value *[]ScopeAssignment `json:"value,omitempty"`
+ // NextLink - Gets the URL to get the next set of results.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// ScopeAssignmentListResultIterator provides access to a complete listing of ScopeAssignment values.
+type ScopeAssignmentListResultIterator struct {
+ i int
+ page ScopeAssignmentListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *ScopeAssignmentListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ScopeAssignmentListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *ScopeAssignmentListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter ScopeAssignmentListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter ScopeAssignmentListResultIterator) Response() ScopeAssignmentListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter ScopeAssignmentListResultIterator) Value() ScopeAssignment {
+ if !iter.page.NotDone() {
+ return ScopeAssignment{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the ScopeAssignmentListResultIterator type.
+func NewScopeAssignmentListResultIterator(page ScopeAssignmentListResultPage) ScopeAssignmentListResultIterator {
+ return ScopeAssignmentListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (salr ScopeAssignmentListResult) IsEmpty() bool {
+ return salr.Value == nil || len(*salr.Value) == 0
+}
+
+// scopeAssignmentListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (salr ScopeAssignmentListResult) scopeAssignmentListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if salr.NextLink == nil || len(to.String(salr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(salr.NextLink)))
+}
+
+// ScopeAssignmentListResultPage contains a page of ScopeAssignment values.
+type ScopeAssignmentListResultPage struct {
+ fn func(context.Context, ScopeAssignmentListResult) (ScopeAssignmentListResult, error)
+ salr ScopeAssignmentListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *ScopeAssignmentListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ScopeAssignmentListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.salr)
+ if err != nil {
+ return err
+ }
+ page.salr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *ScopeAssignmentListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page ScopeAssignmentListResultPage) NotDone() bool {
+ return !page.salr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page ScopeAssignmentListResultPage) Response() ScopeAssignmentListResult {
+ return page.salr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page ScopeAssignmentListResultPage) Values() []ScopeAssignment {
+ if page.salr.IsEmpty() {
+ return nil
+ }
+ return *page.salr.Value
+}
+
+// Creates a new instance of the ScopeAssignmentListResultPage type.
+func NewScopeAssignmentListResultPage(getNextPage func(context.Context, ScopeAssignmentListResult) (ScopeAssignmentListResult, error)) ScopeAssignmentListResultPage {
+ return ScopeAssignmentListResultPage{fn: getNextPage}
+}
+
+// ScopeAssignmentProperties properties of Managed Network
+type ScopeAssignmentProperties struct {
+ // AssignedManagedNetwork - The managed network ID with scope will be assigned to.
+ AssignedManagedNetwork *string `json:"assignedManagedNetwork,omitempty"`
+ // ProvisioningState - READ-ONLY; Provisioning state of the ManagedNetwork resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. Possible values include: 'Updating', 'Deleting', 'Failed', 'Succeeded'
+ ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
+ // Etag - READ-ONLY; A unique read-only string that changes whenever the resource is updated.
+ Etag *string `json:"etag,omitempty"`
+}
+
+// TrackedResource the resource model definition for a ARM tracked top level resource
+type TrackedResource struct {
+ // Tags - Resource tags.
+ Tags *[]string `json:"tags,omitempty"`
+ // Location - The geo-location where the resource lives
+ Location *string `json:"location,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
+ Type *string `json:"type,omitempty"`
+}
+
+// Update update Tags of Managed Network
+type Update struct {
+ // Tags - Updates the tags property of the Managed Network
+ Tags *[]string `json:"tags,omitempty"`
+}
diff --git a/services/preview/managednetwork/mgmt/2019-06-01-preview/managednetwork/operations.go b/services/preview/managednetwork/mgmt/2019-06-01-preview/managednetwork/operations.go
new file mode 100644
index 000000000000..4d5a22fb4962
--- /dev/null
+++ b/services/preview/managednetwork/mgmt/2019-06-01-preview/managednetwork/operations.go
@@ -0,0 +1,149 @@
+package managednetwork
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// OperationsClient is the the Microsoft Azure Managed Network management API provides a RESTful set of web services
+// that interact with Microsoft Azure Networks service to programmatically view, control, change, and monitor your
+// entire Azure network centrally and with ease.
+type OperationsClient struct {
+ BaseClient
+}
+
+// NewOperationsClient creates an instance of the OperationsClient client.
+func NewOperationsClient(subscriptionID string) OperationsClient {
+ return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client.
+func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient {
+ return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// List lists all of the available MNC operations.
+func (client OperationsClient) List(ctx context.Context) (result OperationListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List")
+ defer func() {
+ sc := -1
+ if result.olr.Response.Response != nil {
+ sc = result.olr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.OperationsClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.olr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "managednetwork.OperationsClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.olr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.OperationsClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) {
+ const APIVersion = "2019-06-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPath("/providers/Microsoft.ManagedNetwork/operations"),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client OperationsClient) listNextResults(ctx context.Context, lastResults OperationListResult) (result OperationListResult, err error) {
+ req, err := lastResults.operationListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "managednetwork.OperationsClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "managednetwork.OperationsClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.OperationsClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client OperationsClient) ListComplete(ctx context.Context) (result OperationListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx)
+ return
+}
diff --git a/services/preview/managednetwork/mgmt/2019-06-01-preview/managednetwork/peeringpolicies.go b/services/preview/managednetwork/mgmt/2019-06-01-preview/managednetwork/peeringpolicies.go
new file mode 100644
index 000000000000..a4281b3da967
--- /dev/null
+++ b/services/preview/managednetwork/mgmt/2019-06-01-preview/managednetwork/peeringpolicies.go
@@ -0,0 +1,420 @@
+package managednetwork
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// PeeringPoliciesClient is the the Microsoft Azure Managed Network management API provides a RESTful set of web
+// services that interact with Microsoft Azure Networks service to programmatically view, control, change, and monitor
+// your entire Azure network centrally and with ease.
+type PeeringPoliciesClient struct {
+ BaseClient
+}
+
+// NewPeeringPoliciesClient creates an instance of the PeeringPoliciesClient client.
+func NewPeeringPoliciesClient(subscriptionID string) PeeringPoliciesClient {
+ return NewPeeringPoliciesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewPeeringPoliciesClientWithBaseURI creates an instance of the PeeringPoliciesClient client.
+func NewPeeringPoliciesClientWithBaseURI(baseURI string, subscriptionID string) PeeringPoliciesClient {
+ return PeeringPoliciesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate the Put ManagedNetworkPeeringPolicies operation creates/updates a new Managed Network Peering Policy
+// Parameters:
+// managedNetworkPolicy - parameters supplied to create/update a Managed Network Peering Policy
+// resourceGroupName - the name of the resource group.
+// managedNetworkName - the name of the Managed Network.
+// managedNetworkPeeringPolicyName - the name of the Managed Network Peering Policy.
+func (client PeeringPoliciesClient) CreateOrUpdate(ctx context.Context, managedNetworkPolicy PeeringPolicy, resourceGroupName string, managedNetworkName string, managedNetworkPeeringPolicyName string) (result PeeringPoliciesCreateOrUpdateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PeeringPoliciesClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.CreateOrUpdatePreparer(ctx, managedNetworkPolicy, resourceGroupName, managedNetworkName, managedNetworkPeeringPolicyName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.PeeringPoliciesClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.PeeringPoliciesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client PeeringPoliciesClient) CreateOrUpdatePreparer(ctx context.Context, managedNetworkPolicy PeeringPolicy, resourceGroupName string, managedNetworkName string, managedNetworkPeeringPolicyName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "managedNetworkName": autorest.Encode("path", managedNetworkName),
+ "managedNetworkPeeringPolicyName": autorest.Encode("path", managedNetworkPeeringPolicyName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetwork/managedNetworks/{managedNetworkName}/managedNetworkPeeringPolicies/{managedNetworkPeeringPolicyName}", pathParameters),
+ autorest.WithJSON(managedNetworkPolicy),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client PeeringPoliciesClient) CreateOrUpdateSender(req *http.Request) (future PeeringPoliciesCreateOrUpdateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client PeeringPoliciesClient) CreateOrUpdateResponder(resp *http.Response) (result PeeringPolicy, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete the Delete ManagedNetworkPeeringPolicies operation deletes a Managed Network Peering Policy, specified by the
+// resource group, Managed Network name, and peering policy name
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// managedNetworkName - the name of the Managed Network.
+// managedNetworkPeeringPolicyName - the name of the Managed Network Peering Policy.
+func (client PeeringPoliciesClient) Delete(ctx context.Context, resourceGroupName string, managedNetworkName string, managedNetworkPeeringPolicyName string) (result PeeringPoliciesDeleteFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PeeringPoliciesClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, resourceGroupName, managedNetworkName, managedNetworkPeeringPolicyName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.PeeringPoliciesClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.DeleteSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.PeeringPoliciesClient", "Delete", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client PeeringPoliciesClient) DeletePreparer(ctx context.Context, resourceGroupName string, managedNetworkName string, managedNetworkPeeringPolicyName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "managedNetworkName": autorest.Encode("path", managedNetworkName),
+ "managedNetworkPeeringPolicyName": autorest.Encode("path", managedNetworkPeeringPolicyName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetwork/managedNetworks/{managedNetworkName}/managedNetworkPeeringPolicies/{managedNetworkPeeringPolicyName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client PeeringPoliciesClient) DeleteSender(req *http.Request) (future PeeringPoliciesDeleteFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client PeeringPoliciesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get the Get ManagedNetworkPeeringPolicies operation gets a Managed Network Peering Policy resource, specified by the
+// resource group, Managed Network name, and peering policy name
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// managedNetworkName - the name of the Managed Network.
+// managedNetworkPeeringPolicyName - the name of the Managed Network Peering Policy.
+func (client PeeringPoliciesClient) Get(ctx context.Context, resourceGroupName string, managedNetworkName string, managedNetworkPeeringPolicyName string) (result PeeringPolicy, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PeeringPoliciesClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, resourceGroupName, managedNetworkName, managedNetworkPeeringPolicyName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.PeeringPoliciesClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "managednetwork.PeeringPoliciesClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.PeeringPoliciesClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client PeeringPoliciesClient) GetPreparer(ctx context.Context, resourceGroupName string, managedNetworkName string, managedNetworkPeeringPolicyName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "managedNetworkName": autorest.Encode("path", managedNetworkName),
+ "managedNetworkPeeringPolicyName": autorest.Encode("path", managedNetworkPeeringPolicyName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetwork/managedNetworks/{managedNetworkName}/managedNetworkPeeringPolicies/{managedNetworkPeeringPolicyName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client PeeringPoliciesClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client PeeringPoliciesClient) GetResponder(resp *http.Response) (result PeeringPolicy, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByManagedNetwork the ListByManagedNetwork PeeringPolicies operation retrieves all the Managed Network Peering
+// Policies in a specified Managed Network, in a paginated format.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// managedNetworkName - the name of the Managed Network.
+// top - may be used to limit the number of results in a page for list queries.
+// skiptoken - skiptoken is only used if a previous operation returned a partial result. If a previous response
+// contains a nextLink element, the value of the nextLink element will include a skiptoken parameter that
+// specifies a starting point to use for subsequent calls.
+func (client PeeringPoliciesClient) ListByManagedNetwork(ctx context.Context, resourceGroupName string, managedNetworkName string, top *int32, skiptoken string) (result PeeringPolicyListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PeeringPoliciesClient.ListByManagedNetwork")
+ defer func() {
+ sc := -1
+ if result.pplr.Response.Response != nil {
+ sc = result.pplr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: top,
+ Constraints: []validation.Constraint{{Target: "top", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "top", Name: validation.InclusiveMaximum, Rule: int64(20), Chain: nil},
+ {Target: "top", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
+ }}}}}); err != nil {
+ return result, validation.NewError("managednetwork.PeeringPoliciesClient", "ListByManagedNetwork", err.Error())
+ }
+
+ result.fn = client.listByManagedNetworkNextResults
+ req, err := client.ListByManagedNetworkPreparer(ctx, resourceGroupName, managedNetworkName, top, skiptoken)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.PeeringPoliciesClient", "ListByManagedNetwork", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByManagedNetworkSender(req)
+ if err != nil {
+ result.pplr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "managednetwork.PeeringPoliciesClient", "ListByManagedNetwork", resp, "Failure sending request")
+ return
+ }
+
+ result.pplr, err = client.ListByManagedNetworkResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.PeeringPoliciesClient", "ListByManagedNetwork", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByManagedNetworkPreparer prepares the ListByManagedNetwork request.
+func (client PeeringPoliciesClient) ListByManagedNetworkPreparer(ctx context.Context, resourceGroupName string, managedNetworkName string, top *int32, skiptoken string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "managedNetworkName": autorest.Encode("path", managedNetworkName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if top != nil {
+ queryParameters["$top"] = autorest.Encode("query", *top)
+ }
+ if len(skiptoken) > 0 {
+ queryParameters["$skiptoken"] = autorest.Encode("query", skiptoken)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetwork/managedNetworks/{managedNetworkName}/managedNetworkPeeringPolicies", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByManagedNetworkSender sends the ListByManagedNetwork request. The method will close the
+// http.Response Body if it receives an error.
+func (client PeeringPoliciesClient) ListByManagedNetworkSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByManagedNetworkResponder handles the response to the ListByManagedNetwork request. The method always
+// closes the http.Response Body.
+func (client PeeringPoliciesClient) ListByManagedNetworkResponder(resp *http.Response) (result PeeringPolicyListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByManagedNetworkNextResults retrieves the next set of results, if any.
+func (client PeeringPoliciesClient) listByManagedNetworkNextResults(ctx context.Context, lastResults PeeringPolicyListResult) (result PeeringPolicyListResult, err error) {
+ req, err := lastResults.peeringPolicyListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "managednetwork.PeeringPoliciesClient", "listByManagedNetworkNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByManagedNetworkSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "managednetwork.PeeringPoliciesClient", "listByManagedNetworkNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByManagedNetworkResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.PeeringPoliciesClient", "listByManagedNetworkNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByManagedNetworkComplete enumerates all values, automatically crossing page boundaries as required.
+func (client PeeringPoliciesClient) ListByManagedNetworkComplete(ctx context.Context, resourceGroupName string, managedNetworkName string, top *int32, skiptoken string) (result PeeringPolicyListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PeeringPoliciesClient.ListByManagedNetwork")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByManagedNetwork(ctx, resourceGroupName, managedNetworkName, top, skiptoken)
+ return
+}
diff --git a/services/preview/managednetwork/mgmt/2019-06-01-preview/managednetwork/scopeassignments.go b/services/preview/managednetwork/mgmt/2019-06-01-preview/managednetwork/scopeassignments.go
new file mode 100644
index 000000000000..0c4741c9de5b
--- /dev/null
+++ b/services/preview/managednetwork/mgmt/2019-06-01-preview/managednetwork/scopeassignments.go
@@ -0,0 +1,389 @@
+package managednetwork
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// ScopeAssignmentsClient is the the Microsoft Azure Managed Network management API provides a RESTful set of web
+// services that interact with Microsoft Azure Networks service to programmatically view, control, change, and monitor
+// your entire Azure network centrally and with ease.
+type ScopeAssignmentsClient struct {
+ BaseClient
+}
+
+// NewScopeAssignmentsClient creates an instance of the ScopeAssignmentsClient client.
+func NewScopeAssignmentsClient(subscriptionID string) ScopeAssignmentsClient {
+ return NewScopeAssignmentsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewScopeAssignmentsClientWithBaseURI creates an instance of the ScopeAssignmentsClient client.
+func NewScopeAssignmentsClientWithBaseURI(baseURI string, subscriptionID string) ScopeAssignmentsClient {
+ return ScopeAssignmentsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate creates a scope assignment.
+// Parameters:
+// parameters - parameters supplied to the specify which Managed Network this scope is being assigned
+// scope - the base resource of the scope assignment to create. The scope can be any REST resource instance.
+// For example, use '/subscriptions/{subscription-id}/' for a subscription,
+// '/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}' for a resource group, and
+// '/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/{resource-provider}/{resource-type}/{resource-name}'
+// for a resource.
+// scopeAssignmentName - the name of the scope assignment to create.
+func (client ScopeAssignmentsClient) CreateOrUpdate(ctx context.Context, parameters ScopeAssignment, scope string, scopeAssignmentName string) (result ScopeAssignment, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ScopeAssignmentsClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.CreateOrUpdatePreparer(ctx, parameters, scope, scopeAssignmentName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.ScopeAssignmentsClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateOrUpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "managednetwork.ScopeAssignmentsClient", "CreateOrUpdate", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.ScopeAssignmentsClient", "CreateOrUpdate", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client ScopeAssignmentsClient) CreateOrUpdatePreparer(ctx context.Context, parameters ScopeAssignment, scope string, scopeAssignmentName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "scope": scope,
+ "scopeAssignmentName": autorest.Encode("path", scopeAssignmentName),
+ }
+
+ const APIVersion = "2019-06-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/{scope}/providers/Microsoft.ManagedNetwork/scopeAssignments/{scopeAssignmentName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client ScopeAssignmentsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client ScopeAssignmentsClient) CreateOrUpdateResponder(resp *http.Response) (result ScopeAssignment, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes a scope assignment.
+// Parameters:
+// scope - the scope of the scope assignment to delete.
+// scopeAssignmentName - the name of the scope assignment to delete.
+func (client ScopeAssignmentsClient) Delete(ctx context.Context, scope string, scopeAssignmentName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ScopeAssignmentsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, scope, scopeAssignmentName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.ScopeAssignmentsClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "managednetwork.ScopeAssignmentsClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.ScopeAssignmentsClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client ScopeAssignmentsClient) DeletePreparer(ctx context.Context, scope string, scopeAssignmentName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "scope": scope,
+ "scopeAssignmentName": autorest.Encode("path", scopeAssignmentName),
+ }
+
+ const APIVersion = "2019-06-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/{scope}/providers/Microsoft.ManagedNetwork/scopeAssignments/{scopeAssignmentName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client ScopeAssignmentsClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client ScopeAssignmentsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get get the specified scope assignment.
+// Parameters:
+// scope - the base resource of the scope assignment.
+// scopeAssignmentName - the name of the scope assignment to get.
+func (client ScopeAssignmentsClient) Get(ctx context.Context, scope string, scopeAssignmentName string) (result ScopeAssignment, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ScopeAssignmentsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, scope, scopeAssignmentName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.ScopeAssignmentsClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "managednetwork.ScopeAssignmentsClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.ScopeAssignmentsClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client ScopeAssignmentsClient) GetPreparer(ctx context.Context, scope string, scopeAssignmentName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "scope": scope,
+ "scopeAssignmentName": autorest.Encode("path", scopeAssignmentName),
+ }
+
+ const APIVersion = "2019-06-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/{scope}/providers/Microsoft.ManagedNetwork/scopeAssignments/{scopeAssignmentName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client ScopeAssignmentsClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client ScopeAssignmentsClient) GetResponder(resp *http.Response) (result ScopeAssignment, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List get the specified scope assignment.
+// Parameters:
+// scope - the base resource of the scope assignment.
+func (client ScopeAssignmentsClient) List(ctx context.Context, scope string) (result ScopeAssignmentListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ScopeAssignmentsClient.List")
+ defer func() {
+ sc := -1
+ if result.salr.Response.Response != nil {
+ sc = result.salr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx, scope)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.ScopeAssignmentsClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.salr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "managednetwork.ScopeAssignmentsClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.salr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.ScopeAssignmentsClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client ScopeAssignmentsClient) ListPreparer(ctx context.Context, scope string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "scope": scope,
+ }
+
+ const APIVersion = "2019-06-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/{scope}/providers/Microsoft.ManagedNetwork/scopeAssignments", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client ScopeAssignmentsClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client ScopeAssignmentsClient) ListResponder(resp *http.Response) (result ScopeAssignmentListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client ScopeAssignmentsClient) listNextResults(ctx context.Context, lastResults ScopeAssignmentListResult) (result ScopeAssignmentListResult, err error) {
+ req, err := lastResults.scopeAssignmentListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "managednetwork.ScopeAssignmentsClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "managednetwork.ScopeAssignmentsClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "managednetwork.ScopeAssignmentsClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client ScopeAssignmentsClient) ListComplete(ctx context.Context, scope string) (result ScopeAssignmentListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ScopeAssignmentsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, scope)
+ return
+}
diff --git a/services/preview/managednetwork/mgmt/2019-06-01-preview/managednetwork/version.go b/services/preview/managednetwork/mgmt/2019-06-01-preview/managednetwork/version.go
new file mode 100644
index 000000000000..4924dee61437
--- /dev/null
+++ b/services/preview/managednetwork/mgmt/2019-06-01-preview/managednetwork/version.go
@@ -0,0 +1,30 @@
+package managednetwork
+
+import "github.com/Azure/azure-sdk-for-go/version"
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+// UserAgent returns the UserAgent string to use when sending http.Requests.
+func UserAgent() string {
+ return "Azure-SDK-For-Go/" + version.Number + " managednetwork/2019-06-01-preview"
+}
+
+// Version returns the semantic version (see http://semver.org) of the client.
+func Version() string {
+ return version.Number
+}
diff --git a/services/preview/peering/mgmt/2019-08-01-preview/peering/client.go b/services/preview/peering/mgmt/2019-08-01-preview/peering/client.go
new file mode 100644
index 000000000000..b221a50435f4
--- /dev/null
+++ b/services/preview/peering/mgmt/2019-08-01-preview/peering/client.go
@@ -0,0 +1,133 @@
+// Package peering implements the Azure ARM Peering service API version 2019-08-01-preview.
+//
+// Peering Client
+package peering
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+const (
+ // DefaultBaseURI is the default URI used for the service Peering
+ DefaultBaseURI = "https://management.azure.com"
+)
+
+// BaseClient is the base client for Peering.
+type BaseClient struct {
+ autorest.Client
+ BaseURI string
+ SubscriptionID string
+}
+
+// New creates an instance of the BaseClient client.
+func New(subscriptionID string) BaseClient {
+ return NewWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewWithBaseURI creates an instance of the BaseClient client.
+func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient {
+ return BaseClient{
+ Client: autorest.NewClientWithUserAgent(UserAgent()),
+ BaseURI: baseURI,
+ SubscriptionID: subscriptionID,
+ }
+}
+
+// CheckServiceProviderAvailability checks if the peering service provider is present within 1000 miles of customer's
+// location
+// Parameters:
+// checkServiceProviderAvailabilityInput - the CheckServiceProviderAvailabilityInput indicating customer
+// location and service provider.
+func (client BaseClient) CheckServiceProviderAvailability(ctx context.Context, checkServiceProviderAvailabilityInput CheckServiceProviderAvailabilityInput) (result String, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.CheckServiceProviderAvailability")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.CheckServiceProviderAvailabilityPreparer(ctx, checkServiceProviderAvailabilityInput)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.BaseClient", "CheckServiceProviderAvailability", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CheckServiceProviderAvailabilitySender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "peering.BaseClient", "CheckServiceProviderAvailability", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CheckServiceProviderAvailabilityResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.BaseClient", "CheckServiceProviderAvailability", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CheckServiceProviderAvailabilityPreparer prepares the CheckServiceProviderAvailability request.
+func (client BaseClient) CheckServiceProviderAvailabilityPreparer(ctx context.Context, checkServiceProviderAvailabilityInput CheckServiceProviderAvailabilityInput) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Peering/CheckServiceProviderAvailability", pathParameters),
+ autorest.WithJSON(checkServiceProviderAvailabilityInput),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CheckServiceProviderAvailabilitySender sends the CheckServiceProviderAvailability request. The method will close the
+// http.Response Body if it receives an error.
+func (client BaseClient) CheckServiceProviderAvailabilitySender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CheckServiceProviderAvailabilityResponder handles the response to the CheckServiceProviderAvailability request. The method always
+// closes the http.Response Body.
+func (client BaseClient) CheckServiceProviderAvailabilityResponder(resp *http.Response) (result String, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result.Value),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/preview/peering/mgmt/2019-08-01-preview/peering/legacypeerings.go b/services/preview/peering/mgmt/2019-08-01-preview/peering/legacypeerings.go
new file mode 100644
index 000000000000..bbb540643822
--- /dev/null
+++ b/services/preview/peering/mgmt/2019-08-01-preview/peering/legacypeerings.go
@@ -0,0 +1,156 @@
+package peering
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// LegacyPeeringsClient is the peering Client
+type LegacyPeeringsClient struct {
+ BaseClient
+}
+
+// NewLegacyPeeringsClient creates an instance of the LegacyPeeringsClient client.
+func NewLegacyPeeringsClient(subscriptionID string) LegacyPeeringsClient {
+ return NewLegacyPeeringsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewLegacyPeeringsClientWithBaseURI creates an instance of the LegacyPeeringsClient client.
+func NewLegacyPeeringsClientWithBaseURI(baseURI string, subscriptionID string) LegacyPeeringsClient {
+ return LegacyPeeringsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// List lists all of the legacy peerings under the given subscription matching the specified kind and location.
+// Parameters:
+// peeringLocation - the location of the peering.
+// kind - the kind of the peering.
+func (client LegacyPeeringsClient) List(ctx context.Context, peeringLocation string, kind string) (result ListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/LegacyPeeringsClient.List")
+ defer func() {
+ sc := -1
+ if result.lr.Response.Response != nil {
+ sc = result.lr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx, peeringLocation, kind)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.LegacyPeeringsClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.lr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "peering.LegacyPeeringsClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.lr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.LegacyPeeringsClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client LegacyPeeringsClient) ListPreparer(ctx context.Context, peeringLocation string, kind string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ "kind": autorest.Encode("query", kind),
+ "peeringLocation": autorest.Encode("query", peeringLocation),
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Peering/legacyPeerings", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client LegacyPeeringsClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client LegacyPeeringsClient) ListResponder(resp *http.Response) (result ListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client LegacyPeeringsClient) listNextResults(ctx context.Context, lastResults ListResult) (result ListResult, err error) {
+ req, err := lastResults.listResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "peering.LegacyPeeringsClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "peering.LegacyPeeringsClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.LegacyPeeringsClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client LegacyPeeringsClient) ListComplete(ctx context.Context, peeringLocation string, kind string) (result ListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/LegacyPeeringsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, peeringLocation, kind)
+ return
+}
diff --git a/services/preview/peering/mgmt/2019-08-01-preview/peering/locations.go b/services/preview/peering/mgmt/2019-08-01-preview/peering/locations.go
new file mode 100644
index 000000000000..c77b5576f293
--- /dev/null
+++ b/services/preview/peering/mgmt/2019-08-01-preview/peering/locations.go
@@ -0,0 +1,158 @@
+package peering
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// LocationsClient is the peering Client
+type LocationsClient struct {
+ BaseClient
+}
+
+// NewLocationsClient creates an instance of the LocationsClient client.
+func NewLocationsClient(subscriptionID string) LocationsClient {
+ return NewLocationsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewLocationsClientWithBaseURI creates an instance of the LocationsClient client.
+func NewLocationsClientWithBaseURI(baseURI string, subscriptionID string) LocationsClient {
+ return LocationsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// List lists all of the available peering locations for the specified kind of peering.
+// Parameters:
+// kind - the kind of the peering.
+// directPeeringType - the type of direct peering.
+func (client LocationsClient) List(ctx context.Context, kind string, directPeeringType string) (result LocationListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/LocationsClient.List")
+ defer func() {
+ sc := -1
+ if result.llr.Response.Response != nil {
+ sc = result.llr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx, kind, directPeeringType)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.LocationsClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.llr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "peering.LocationsClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.llr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.LocationsClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client LocationsClient) ListPreparer(ctx context.Context, kind string, directPeeringType string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ "kind": autorest.Encode("query", kind),
+ }
+ if len(string(directPeeringType)) > 0 {
+ queryParameters["directPeeringType"] = autorest.Encode("query", directPeeringType)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Peering/peeringLocations", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client LocationsClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client LocationsClient) ListResponder(resp *http.Response) (result LocationListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client LocationsClient) listNextResults(ctx context.Context, lastResults LocationListResult) (result LocationListResult, err error) {
+ req, err := lastResults.locationListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "peering.LocationsClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "peering.LocationsClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.LocationsClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client LocationsClient) ListComplete(ctx context.Context, kind string, directPeeringType string) (result LocationListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/LocationsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, kind, directPeeringType)
+ return
+}
diff --git a/services/preview/peering/mgmt/2019-08-01-preview/peering/models.go b/services/preview/peering/mgmt/2019-08-01-preview/peering/models.go
new file mode 100644
index 000000000000..676d8b5b540a
--- /dev/null
+++ b/services/preview/peering/mgmt/2019-08-01-preview/peering/models.go
@@ -0,0 +1,2399 @@
+package peering
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "encoding/json"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/to"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// The package's fully qualified name.
+const fqdn = "github.com/Azure/azure-sdk-for-go/services/preview/peering/mgmt/2019-08-01-preview/peering"
+
+// ConnectionState enumerates the values for connection state.
+type ConnectionState string
+
+const (
+ // Active ...
+ Active ConnectionState = "Active"
+ // Approved ...
+ Approved ConnectionState = "Approved"
+ // None ...
+ None ConnectionState = "None"
+ // PendingApproval ...
+ PendingApproval ConnectionState = "PendingApproval"
+ // ProvisioningCompleted ...
+ ProvisioningCompleted ConnectionState = "ProvisioningCompleted"
+ // ProvisioningFailed ...
+ ProvisioningFailed ConnectionState = "ProvisioningFailed"
+ // ProvisioningStarted ...
+ ProvisioningStarted ConnectionState = "ProvisioningStarted"
+ // Validating ...
+ Validating ConnectionState = "Validating"
+)
+
+// PossibleConnectionStateValues returns an array of possible values for the ConnectionState const type.
+func PossibleConnectionStateValues() []ConnectionState {
+ return []ConnectionState{Active, Approved, None, PendingApproval, ProvisioningCompleted, ProvisioningFailed, ProvisioningStarted, Validating}
+}
+
+// DirectPeeringType enumerates the values for direct peering type.
+type DirectPeeringType string
+
+const (
+ // Cdn ...
+ Cdn DirectPeeringType = "Cdn"
+ // Edge ...
+ Edge DirectPeeringType = "Edge"
+ // Internal ...
+ Internal DirectPeeringType = "Internal"
+ // Transit ...
+ Transit DirectPeeringType = "Transit"
+)
+
+// PossibleDirectPeeringTypeValues returns an array of possible values for the DirectPeeringType const type.
+func PossibleDirectPeeringTypeValues() []DirectPeeringType {
+ return []DirectPeeringType{Cdn, Edge, Internal, Transit}
+}
+
+// Family enumerates the values for family.
+type Family string
+
+const (
+ // Direct ...
+ Direct Family = "Direct"
+ // Exchange ...
+ Exchange Family = "Exchange"
+)
+
+// PossibleFamilyValues returns an array of possible values for the Family const type.
+func PossibleFamilyValues() []Family {
+ return []Family{Direct, Exchange}
+}
+
+// Kind enumerates the values for kind.
+type Kind string
+
+const (
+ // KindDirect ...
+ KindDirect Kind = "Direct"
+ // KindExchange ...
+ KindExchange Kind = "Exchange"
+)
+
+// PossibleKindValues returns an array of possible values for the Kind const type.
+func PossibleKindValues() []Kind {
+ return []Kind{KindDirect, KindExchange}
+}
+
+// LearnedType enumerates the values for learned type.
+type LearnedType string
+
+const (
+ // LearnedTypeNone ...
+ LearnedTypeNone LearnedType = "None"
+ // LearnedTypeViaPartner ...
+ LearnedTypeViaPartner LearnedType = "ViaPartner"
+ // LearnedTypeViaSession ...
+ LearnedTypeViaSession LearnedType = "ViaSession"
+)
+
+// PossibleLearnedTypeValues returns an array of possible values for the LearnedType const type.
+func PossibleLearnedTypeValues() []LearnedType {
+ return []LearnedType{LearnedTypeNone, LearnedTypeViaPartner, LearnedTypeViaSession}
+}
+
+// Name enumerates the values for name.
+type Name string
+
+const (
+ // BasicDirectFree ...
+ BasicDirectFree Name = "Basic_Direct_Free"
+ // BasicExchangeFree ...
+ BasicExchangeFree Name = "Basic_Exchange_Free"
+ // PremiumDirectFree ...
+ PremiumDirectFree Name = "Premium_Direct_Free"
+ // PremiumDirectMetered ...
+ PremiumDirectMetered Name = "Premium_Direct_Metered"
+ // PremiumDirectUnlimited ...
+ PremiumDirectUnlimited Name = "Premium_Direct_Unlimited"
+ // PremiumExchangeMetered ...
+ PremiumExchangeMetered Name = "Premium_Exchange_Metered"
+)
+
+// PossibleNameValues returns an array of possible values for the Name const type.
+func PossibleNameValues() []Name {
+ return []Name{BasicDirectFree, BasicExchangeFree, PremiumDirectFree, PremiumDirectMetered, PremiumDirectUnlimited, PremiumExchangeMetered}
+}
+
+// PrefixValidationState enumerates the values for prefix validation state.
+type PrefixValidationState string
+
+const (
+ // PrefixValidationStateFailed ...
+ PrefixValidationStateFailed PrefixValidationState = "Failed"
+ // PrefixValidationStateInvalid ...
+ PrefixValidationStateInvalid PrefixValidationState = "Invalid"
+ // PrefixValidationStateNone ...
+ PrefixValidationStateNone PrefixValidationState = "None"
+ // PrefixValidationStatePending ...
+ PrefixValidationStatePending PrefixValidationState = "Pending"
+ // PrefixValidationStateUnknown ...
+ PrefixValidationStateUnknown PrefixValidationState = "Unknown"
+ // PrefixValidationStateVerified ...
+ PrefixValidationStateVerified PrefixValidationState = "Verified"
+)
+
+// PossiblePrefixValidationStateValues returns an array of possible values for the PrefixValidationState const type.
+func PossiblePrefixValidationStateValues() []PrefixValidationState {
+ return []PrefixValidationState{PrefixValidationStateFailed, PrefixValidationStateInvalid, PrefixValidationStateNone, PrefixValidationStatePending, PrefixValidationStateUnknown, PrefixValidationStateVerified}
+}
+
+// ProvisioningState enumerates the values for provisioning state.
+type ProvisioningState string
+
+const (
+ // Deleting ...
+ Deleting ProvisioningState = "Deleting"
+ // Failed ...
+ Failed ProvisioningState = "Failed"
+ // Succeeded ...
+ Succeeded ProvisioningState = "Succeeded"
+ // Updating ...
+ Updating ProvisioningState = "Updating"
+)
+
+// PossibleProvisioningStateValues returns an array of possible values for the ProvisioningState const type.
+func PossibleProvisioningStateValues() []ProvisioningState {
+ return []ProvisioningState{Deleting, Failed, Succeeded, Updating}
+}
+
+// SessionAddressProvider enumerates the values for session address provider.
+type SessionAddressProvider string
+
+const (
+ // Microsoft ...
+ Microsoft SessionAddressProvider = "Microsoft"
+ // Peer ...
+ Peer SessionAddressProvider = "Peer"
+)
+
+// PossibleSessionAddressProviderValues returns an array of possible values for the SessionAddressProvider const type.
+func PossibleSessionAddressProviderValues() []SessionAddressProvider {
+ return []SessionAddressProvider{Microsoft, Peer}
+}
+
+// SessionStateV4 enumerates the values for session state v4.
+type SessionStateV4 string
+
+const (
+ // SessionStateV4Active ...
+ SessionStateV4Active SessionStateV4 = "Active"
+ // SessionStateV4Connect ...
+ SessionStateV4Connect SessionStateV4 = "Connect"
+ // SessionStateV4Established ...
+ SessionStateV4Established SessionStateV4 = "Established"
+ // SessionStateV4Idle ...
+ SessionStateV4Idle SessionStateV4 = "Idle"
+ // SessionStateV4None ...
+ SessionStateV4None SessionStateV4 = "None"
+ // SessionStateV4OpenConfirm ...
+ SessionStateV4OpenConfirm SessionStateV4 = "OpenConfirm"
+ // SessionStateV4OpenReceived ...
+ SessionStateV4OpenReceived SessionStateV4 = "OpenReceived"
+ // SessionStateV4OpenSent ...
+ SessionStateV4OpenSent SessionStateV4 = "OpenSent"
+ // SessionStateV4PendingAdd ...
+ SessionStateV4PendingAdd SessionStateV4 = "PendingAdd"
+ // SessionStateV4PendingRemove ...
+ SessionStateV4PendingRemove SessionStateV4 = "PendingRemove"
+ // SessionStateV4PendingUpdate ...
+ SessionStateV4PendingUpdate SessionStateV4 = "PendingUpdate"
+)
+
+// PossibleSessionStateV4Values returns an array of possible values for the SessionStateV4 const type.
+func PossibleSessionStateV4Values() []SessionStateV4 {
+ return []SessionStateV4{SessionStateV4Active, SessionStateV4Connect, SessionStateV4Established, SessionStateV4Idle, SessionStateV4None, SessionStateV4OpenConfirm, SessionStateV4OpenReceived, SessionStateV4OpenSent, SessionStateV4PendingAdd, SessionStateV4PendingRemove, SessionStateV4PendingUpdate}
+}
+
+// SessionStateV6 enumerates the values for session state v6.
+type SessionStateV6 string
+
+const (
+ // SessionStateV6Active ...
+ SessionStateV6Active SessionStateV6 = "Active"
+ // SessionStateV6Connect ...
+ SessionStateV6Connect SessionStateV6 = "Connect"
+ // SessionStateV6Established ...
+ SessionStateV6Established SessionStateV6 = "Established"
+ // SessionStateV6Idle ...
+ SessionStateV6Idle SessionStateV6 = "Idle"
+ // SessionStateV6None ...
+ SessionStateV6None SessionStateV6 = "None"
+ // SessionStateV6OpenConfirm ...
+ SessionStateV6OpenConfirm SessionStateV6 = "OpenConfirm"
+ // SessionStateV6OpenReceived ...
+ SessionStateV6OpenReceived SessionStateV6 = "OpenReceived"
+ // SessionStateV6OpenSent ...
+ SessionStateV6OpenSent SessionStateV6 = "OpenSent"
+ // SessionStateV6PendingAdd ...
+ SessionStateV6PendingAdd SessionStateV6 = "PendingAdd"
+ // SessionStateV6PendingRemove ...
+ SessionStateV6PendingRemove SessionStateV6 = "PendingRemove"
+ // SessionStateV6PendingUpdate ...
+ SessionStateV6PendingUpdate SessionStateV6 = "PendingUpdate"
+)
+
+// PossibleSessionStateV6Values returns an array of possible values for the SessionStateV6 const type.
+func PossibleSessionStateV6Values() []SessionStateV6 {
+ return []SessionStateV6{SessionStateV6Active, SessionStateV6Connect, SessionStateV6Established, SessionStateV6Idle, SessionStateV6None, SessionStateV6OpenConfirm, SessionStateV6OpenReceived, SessionStateV6OpenSent, SessionStateV6PendingAdd, SessionStateV6PendingRemove, SessionStateV6PendingUpdate}
+}
+
+// Size enumerates the values for size.
+type Size string
+
+const (
+ // Free ...
+ Free Size = "Free"
+ // Metered ...
+ Metered Size = "Metered"
+ // Unlimited ...
+ Unlimited Size = "Unlimited"
+)
+
+// PossibleSizeValues returns an array of possible values for the Size const type.
+func PossibleSizeValues() []Size {
+ return []Size{Free, Metered, Unlimited}
+}
+
+// Tier enumerates the values for tier.
+type Tier string
+
+const (
+ // Basic ...
+ Basic Tier = "Basic"
+ // Premium ...
+ Premium Tier = "Premium"
+)
+
+// PossibleTierValues returns an array of possible values for the Tier const type.
+func PossibleTierValues() []Tier {
+ return []Tier{Basic, Premium}
+}
+
+// ValidationState enumerates the values for validation state.
+type ValidationState string
+
+const (
+ // ValidationStateApproved ...
+ ValidationStateApproved ValidationState = "Approved"
+ // ValidationStateFailed ...
+ ValidationStateFailed ValidationState = "Failed"
+ // ValidationStateNone ...
+ ValidationStateNone ValidationState = "None"
+ // ValidationStatePending ...
+ ValidationStatePending ValidationState = "Pending"
+)
+
+// PossibleValidationStateValues returns an array of possible values for the ValidationState const type.
+func PossibleValidationStateValues() []ValidationState {
+ return []ValidationState{ValidationStateApproved, ValidationStateFailed, ValidationStateNone, ValidationStatePending}
+}
+
+// BandwidthOffer the properties that define a peering bandwidth offer.
+type BandwidthOffer struct {
+ // OfferName - The name of the bandwidth offer.
+ OfferName *string `json:"offerName,omitempty"`
+ // ValueInMbps - The value of the bandwidth offer in Mbps.
+ ValueInMbps *int32 `json:"valueInMbps,omitempty"`
+}
+
+// BgpSession the properties that define a BGP session.
+type BgpSession struct {
+ // SessionPrefixV4 - The IPv4 prefix that contains both ends' IPv4 addresses.
+ SessionPrefixV4 *string `json:"sessionPrefixV4,omitempty"`
+ // SessionPrefixV6 - The IPv6 prefix that contains both ends' IPv6 addresses.
+ SessionPrefixV6 *string `json:"sessionPrefixV6,omitempty"`
+ // MicrosoftSessionIPv4Address - READ-ONLY; The IPv4 session address on Microsoft's end.
+ MicrosoftSessionIPv4Address *string `json:"microsoftSessionIPv4Address,omitempty"`
+ // MicrosoftSessionIPv6Address - READ-ONLY; The IPv6 session address on Microsoft's end.
+ MicrosoftSessionIPv6Address *string `json:"microsoftSessionIPv6Address,omitempty"`
+ // PeerSessionIPv4Address - The IPv4 session address on peer's end.
+ PeerSessionIPv4Address *string `json:"peerSessionIPv4Address,omitempty"`
+ // PeerSessionIPv6Address - The IPv6 session address on peer's end.
+ PeerSessionIPv6Address *string `json:"peerSessionIPv6Address,omitempty"`
+ // SessionStateV4 - READ-ONLY; The state of the IPv4 session. Possible values include: 'SessionStateV4None', 'SessionStateV4Idle', 'SessionStateV4Connect', 'SessionStateV4Active', 'SessionStateV4OpenSent', 'SessionStateV4OpenConfirm', 'SessionStateV4OpenReceived', 'SessionStateV4Established', 'SessionStateV4PendingAdd', 'SessionStateV4PendingUpdate', 'SessionStateV4PendingRemove'
+ SessionStateV4 SessionStateV4 `json:"sessionStateV4,omitempty"`
+ // SessionStateV6 - READ-ONLY; The state of the IPv6 session. Possible values include: 'SessionStateV6None', 'SessionStateV6Idle', 'SessionStateV6Connect', 'SessionStateV6Active', 'SessionStateV6OpenSent', 'SessionStateV6OpenConfirm', 'SessionStateV6OpenReceived', 'SessionStateV6Established', 'SessionStateV6PendingAdd', 'SessionStateV6PendingUpdate', 'SessionStateV6PendingRemove'
+ SessionStateV6 SessionStateV6 `json:"sessionStateV6,omitempty"`
+ // MaxPrefixesAdvertisedV4 - The maximum number of prefixes advertised over the IPv4 session.
+ MaxPrefixesAdvertisedV4 *int32 `json:"maxPrefixesAdvertisedV4,omitempty"`
+ // MaxPrefixesAdvertisedV6 - The maximum number of prefixes advertised over the IPv6 session.
+ MaxPrefixesAdvertisedV6 *int32 `json:"maxPrefixesAdvertisedV6,omitempty"`
+ // Md5AuthenticationKey - The MD5 authentication key of the session.
+ Md5AuthenticationKey *string `json:"md5AuthenticationKey,omitempty"`
+}
+
+// CheckServiceProviderAvailabilityInput class for CheckServiceProviderAvailabilityInput
+type CheckServiceProviderAvailabilityInput struct {
+ // PeeringServiceLocation - Gets or sets the PeeringServiceLocation
+ PeeringServiceLocation *string `json:"peeringServiceLocation,omitempty"`
+ // PeeringServiceProvider - Gets or sets the PeeringServiceProvider
+ PeeringServiceProvider *string `json:"peeringServiceProvider,omitempty"`
+}
+
+// ContactInfo the contact information of the peer.
+type ContactInfo struct {
+ // Emails - The list of email addresses.
+ Emails *[]string `json:"emails,omitempty"`
+ // Phone - The list of contact numbers.
+ Phone *[]string `json:"phone,omitempty"`
+}
+
+// DirectConnection the properties that define a direct connection.
+type DirectConnection struct {
+ // BandwidthInMbps - The bandwidth of the connection.
+ BandwidthInMbps *int32 `json:"bandwidthInMbps,omitempty"`
+ // ProvisionedBandwidthInMbps - The bandwidth that is actually provisioned.
+ ProvisionedBandwidthInMbps *int32 `json:"provisionedBandwidthInMbps,omitempty"`
+ // SessionAddressProvider - The field indicating if Microsoft provides session ip addresses. Possible values include: 'Microsoft', 'Peer'
+ SessionAddressProvider SessionAddressProvider `json:"sessionAddressProvider,omitempty"`
+ // UseForPeeringService - The flag that indicates whether or not the connection is used for peering service.
+ UseForPeeringService *bool `json:"useForPeeringService,omitempty"`
+ // PeeringDBFacilityID - The PeeringDB.com ID of the facility at which the connection has to be set up.
+ PeeringDBFacilityID *int32 `json:"peeringDBFacilityId,omitempty"`
+ // ConnectionState - READ-ONLY; The state of the connection. Possible values include: 'None', 'PendingApproval', 'Approved', 'ProvisioningStarted', 'ProvisioningFailed', 'ProvisioningCompleted', 'Validating', 'Active'
+ ConnectionState ConnectionState `json:"connectionState,omitempty"`
+ // BgpSession - The BGP session associated with the connection.
+ BgpSession *BgpSession `json:"bgpSession,omitempty"`
+ // ConnectionIdentifier - The unique identifier (GUID) for the connection.
+ ConnectionIdentifier *string `json:"connectionIdentifier,omitempty"`
+}
+
+// DirectPeeringFacility the properties that define a direct peering facility.
+type DirectPeeringFacility struct {
+ // Address - The address of the direct peering facility.
+ Address *string `json:"address,omitempty"`
+ // DirectPeeringType - The type of the direct peering. Possible values include: 'Edge', 'Transit', 'Cdn', 'Internal'
+ DirectPeeringType DirectPeeringType `json:"directPeeringType,omitempty"`
+ // PeeringDBFacilityID - The PeeringDB.com ID of the facility.
+ PeeringDBFacilityID *int32 `json:"peeringDBFacilityId,omitempty"`
+ // PeeringDBFacilityLink - The PeeringDB.com URL of the facility.
+ PeeringDBFacilityLink *string `json:"peeringDBFacilityLink,omitempty"`
+}
+
+// ErrorResponse the error response that indicates why an operation has failed.
+type ErrorResponse struct {
+ // Code - READ-ONLY; The error code.
+ Code *string `json:"code,omitempty"`
+ // Message - READ-ONLY; The error message.
+ Message *string `json:"message,omitempty"`
+}
+
+// ExchangeConnection the properties that define an exchange connection.
+type ExchangeConnection struct {
+ // PeeringDBFacilityID - The PeeringDB.com ID of the facility at which the connection has to be set up.
+ PeeringDBFacilityID *int32 `json:"peeringDBFacilityId,omitempty"`
+ // ConnectionState - READ-ONLY; The state of the connection. Possible values include: 'None', 'PendingApproval', 'Approved', 'ProvisioningStarted', 'ProvisioningFailed', 'ProvisioningCompleted', 'Validating', 'Active'
+ ConnectionState ConnectionState `json:"connectionState,omitempty"`
+ // BgpSession - The BGP session associated with the connection.
+ BgpSession *BgpSession `json:"bgpSession,omitempty"`
+ // ConnectionIdentifier - The unique identifier (GUID) for the connection.
+ ConnectionIdentifier *string `json:"connectionIdentifier,omitempty"`
+}
+
+// ExchangePeeringFacility the properties that define an exchange peering facility.
+type ExchangePeeringFacility struct {
+ // ExchangeName - The name of the exchange peering facility.
+ ExchangeName *string `json:"exchangeName,omitempty"`
+ // BandwidthInMbps - The bandwidth of the connection between Microsoft and the exchange peering facility.
+ BandwidthInMbps *int32 `json:"bandwidthInMbps,omitempty"`
+ // MicrosoftIPv4Address - The IPv4 address of Microsoft at the exchange peering facility.
+ MicrosoftIPv4Address *string `json:"microsoftIPv4Address,omitempty"`
+ // MicrosoftIPv6Address - The IPv6 address of Microsoft at the exchange peering facility.
+ MicrosoftIPv6Address *string `json:"microsoftIPv6Address,omitempty"`
+ // FacilityIPv4Prefix - The IPv4 prefixes associated with the exchange peering facility.
+ FacilityIPv4Prefix *string `json:"facilityIPv4Prefix,omitempty"`
+ // FacilityIPv6Prefix - The IPv6 prefixes associated with the exchange peering facility.
+ FacilityIPv6Prefix *string `json:"facilityIPv6Prefix,omitempty"`
+ // PeeringDBFacilityID - The PeeringDB.com ID of the facility.
+ PeeringDBFacilityID *int32 `json:"peeringDBFacilityId,omitempty"`
+ // PeeringDBFacilityLink - The PeeringDB.com URL of the facility.
+ PeeringDBFacilityLink *string `json:"peeringDBFacilityLink,omitempty"`
+}
+
+// ListResult the paginated list of peerings.
+type ListResult struct {
+ autorest.Response `json:"-"`
+ // Value - The list of peerings.
+ Value *[]Model `json:"value,omitempty"`
+ // NextLink - The link to fetch the next page of peerings.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// ListResultIterator provides access to a complete listing of Model values.
+type ListResultIterator struct {
+ i int
+ page ListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *ListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *ListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter ListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter ListResultIterator) Response() ListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter ListResultIterator) Value() Model {
+ if !iter.page.NotDone() {
+ return Model{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the ListResultIterator type.
+func NewListResultIterator(page ListResultPage) ListResultIterator {
+ return ListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (lr ListResult) IsEmpty() bool {
+ return lr.Value == nil || len(*lr.Value) == 0
+}
+
+// listResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (lr ListResult) listResultPreparer(ctx context.Context) (*http.Request, error) {
+ if lr.NextLink == nil || len(to.String(lr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(lr.NextLink)))
+}
+
+// ListResultPage contains a page of Model values.
+type ListResultPage struct {
+ fn func(context.Context, ListResult) (ListResult, error)
+ lr ListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *ListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.lr)
+ if err != nil {
+ return err
+ }
+ page.lr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *ListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page ListResultPage) NotDone() bool {
+ return !page.lr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page ListResultPage) Response() ListResult {
+ return page.lr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page ListResultPage) Values() []Model {
+ if page.lr.IsEmpty() {
+ return nil
+ }
+ return *page.lr.Value
+}
+
+// Creates a new instance of the ListResultPage type.
+func NewListResultPage(getNextPage func(context.Context, ListResult) (ListResult, error)) ListResultPage {
+ return ListResultPage{fn: getNextPage}
+}
+
+// Location peering location is where connectivity could be established to the Microsoft Cloud Edge.
+type Location struct {
+ // Kind - The kind of peering that the peering location supports. Possible values include: 'KindDirect', 'KindExchange'
+ Kind Kind `json:"kind,omitempty"`
+ // LocationProperties - The properties that define a peering location.
+ *LocationProperties `json:"properties,omitempty"`
+ // Name - READ-ONLY; The name of the resource.
+ Name *string `json:"name,omitempty"`
+ // ID - READ-ONLY; The ID of the resource.
+ ID *string `json:"id,omitempty"`
+ // Type - READ-ONLY; The type of the resource.
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for Location.
+func (l Location) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if l.Kind != "" {
+ objectMap["kind"] = l.Kind
+ }
+ if l.LocationProperties != nil {
+ objectMap["properties"] = l.LocationProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for Location struct.
+func (l *Location) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "kind":
+ if v != nil {
+ var kind Kind
+ err = json.Unmarshal(*v, &kind)
+ if err != nil {
+ return err
+ }
+ l.Kind = kind
+ }
+ case "properties":
+ if v != nil {
+ var locationProperties LocationProperties
+ err = json.Unmarshal(*v, &locationProperties)
+ if err != nil {
+ return err
+ }
+ l.LocationProperties = &locationProperties
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ l.Name = &name
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ l.ID = &ID
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ l.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// LocationListResult the paginated list of peering locations.
+type LocationListResult struct {
+ autorest.Response `json:"-"`
+ // Value - The list of peering locations.
+ Value *[]Location `json:"value,omitempty"`
+ // NextLink - The link to fetch the next page of peering locations.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// LocationListResultIterator provides access to a complete listing of Location values.
+type LocationListResultIterator struct {
+ i int
+ page LocationListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *LocationListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/LocationListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *LocationListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter LocationListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter LocationListResultIterator) Response() LocationListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter LocationListResultIterator) Value() Location {
+ if !iter.page.NotDone() {
+ return Location{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the LocationListResultIterator type.
+func NewLocationListResultIterator(page LocationListResultPage) LocationListResultIterator {
+ return LocationListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (llr LocationListResult) IsEmpty() bool {
+ return llr.Value == nil || len(*llr.Value) == 0
+}
+
+// locationListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (llr LocationListResult) locationListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if llr.NextLink == nil || len(to.String(llr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(llr.NextLink)))
+}
+
+// LocationListResultPage contains a page of Location values.
+type LocationListResultPage struct {
+ fn func(context.Context, LocationListResult) (LocationListResult, error)
+ llr LocationListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *LocationListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/LocationListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.llr)
+ if err != nil {
+ return err
+ }
+ page.llr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *LocationListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page LocationListResultPage) NotDone() bool {
+ return !page.llr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page LocationListResultPage) Response() LocationListResult {
+ return page.llr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page LocationListResultPage) Values() []Location {
+ if page.llr.IsEmpty() {
+ return nil
+ }
+ return *page.llr.Value
+}
+
+// Creates a new instance of the LocationListResultPage type.
+func NewLocationListResultPage(getNextPage func(context.Context, LocationListResult) (LocationListResult, error)) LocationListResultPage {
+ return LocationListResultPage{fn: getNextPage}
+}
+
+// LocationProperties the properties that define a peering location.
+type LocationProperties struct {
+ // Direct - The properties that define a direct peering location.
+ Direct *LocationPropertiesDirect `json:"direct,omitempty"`
+ // Exchange - The properties that define an exchange peering location.
+ Exchange *LocationPropertiesExchange `json:"exchange,omitempty"`
+ // PeeringLocation - The name of the peering location.
+ PeeringLocation *string `json:"peeringLocation,omitempty"`
+ // Country - The country in which the peering location exists.
+ Country *string `json:"country,omitempty"`
+ // AzureRegion - The Azure region associated with the peering location.
+ AzureRegion *string `json:"azureRegion,omitempty"`
+}
+
+// LocationPropertiesDirect the properties that define a direct peering location.
+type LocationPropertiesDirect struct {
+ // PeeringFacilities - The list of direct peering facilities at the peering location.
+ PeeringFacilities *[]DirectPeeringFacility `json:"peeringFacilities,omitempty"`
+ // BandwidthOffers - The list of bandwidth offers available at the peering location.
+ BandwidthOffers *[]BandwidthOffer `json:"bandwidthOffers,omitempty"`
+}
+
+// LocationPropertiesExchange the properties that define an exchange peering location.
+type LocationPropertiesExchange struct {
+ // PeeringFacilities - The list of exchange peering facilities at the peering location.
+ PeeringFacilities *[]ExchangePeeringFacility `json:"peeringFacilities,omitempty"`
+}
+
+// Model peering is a logical representation of a set of connections to the Microsoft Cloud Edge at a
+// location.
+type Model struct {
+ autorest.Response `json:"-"`
+ // Sku - The SKU that defines the tier and kind of the peering.
+ Sku *Sku `json:"sku,omitempty"`
+ // Kind - The kind of the peering. Possible values include: 'KindDirect', 'KindExchange'
+ Kind Kind `json:"kind,omitempty"`
+ // Properties - The properties that define a peering.
+ *Properties `json:"properties,omitempty"`
+ // Location - The location of the resource.
+ Location *string `json:"location,omitempty"`
+ // Tags - The resource tags.
+ Tags map[string]*string `json:"tags"`
+ // Name - READ-ONLY; The name of the resource.
+ Name *string `json:"name,omitempty"`
+ // ID - READ-ONLY; The ID of the resource.
+ ID *string `json:"id,omitempty"`
+ // Type - READ-ONLY; The type of the resource.
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for Model.
+func (mVar Model) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if mVar.Sku != nil {
+ objectMap["sku"] = mVar.Sku
+ }
+ if mVar.Kind != "" {
+ objectMap["kind"] = mVar.Kind
+ }
+ if mVar.Properties != nil {
+ objectMap["properties"] = mVar.Properties
+ }
+ if mVar.Location != nil {
+ objectMap["location"] = mVar.Location
+ }
+ if mVar.Tags != nil {
+ objectMap["tags"] = mVar.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for Model struct.
+func (mVar *Model) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "sku":
+ if v != nil {
+ var sku Sku
+ err = json.Unmarshal(*v, &sku)
+ if err != nil {
+ return err
+ }
+ mVar.Sku = &sku
+ }
+ case "kind":
+ if v != nil {
+ var kind Kind
+ err = json.Unmarshal(*v, &kind)
+ if err != nil {
+ return err
+ }
+ mVar.Kind = kind
+ }
+ case "properties":
+ if v != nil {
+ var properties Properties
+ err = json.Unmarshal(*v, &properties)
+ if err != nil {
+ return err
+ }
+ mVar.Properties = &properties
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ mVar.Location = &location
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ mVar.Tags = tags
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ mVar.Name = &name
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ mVar.ID = &ID
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ mVar.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// Operation the peering API operation.
+type Operation struct {
+ // Name - READ-ONLY; The name of the operation.
+ Name *string `json:"name,omitempty"`
+ // Display - READ-ONLY; The information related to the operation.
+ Display *OperationDisplayInfo `json:"display,omitempty"`
+ // IsDataAction - READ-ONLY; The flag that indicates whether the operation applies to data plane.
+ IsDataAction *bool `json:"isDataAction,omitempty"`
+}
+
+// OperationDisplayInfo the information related to the operation.
+type OperationDisplayInfo struct {
+ // Provider - READ-ONLY; The name of the resource provider.
+ Provider *string `json:"provider,omitempty"`
+ // Resource - READ-ONLY; The type of the resource.
+ Resource *string `json:"resource,omitempty"`
+ // Operation - READ-ONLY; The name of the operation.
+ Operation *string `json:"operation,omitempty"`
+ // Description - READ-ONLY; The description of the operation.
+ Description *string `json:"description,omitempty"`
+}
+
+// OperationListResult the paginated list of peering API operations.
+type OperationListResult struct {
+ autorest.Response `json:"-"`
+ // Value - The list of peering API operations.
+ Value *[]Operation `json:"value,omitempty"`
+ // NextLink - The link to fetch the next page of peering API operations.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// OperationListResultIterator provides access to a complete listing of Operation values.
+type OperationListResultIterator struct {
+ i int
+ page OperationListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *OperationListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OperationListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *OperationListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter OperationListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter OperationListResultIterator) Response() OperationListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter OperationListResultIterator) Value() Operation {
+ if !iter.page.NotDone() {
+ return Operation{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the OperationListResultIterator type.
+func NewOperationListResultIterator(page OperationListResultPage) OperationListResultIterator {
+ return OperationListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (olr OperationListResult) IsEmpty() bool {
+ return olr.Value == nil || len(*olr.Value) == 0
+}
+
+// operationListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (olr OperationListResult) operationListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if olr.NextLink == nil || len(to.String(olr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(olr.NextLink)))
+}
+
+// OperationListResultPage contains a page of Operation values.
+type OperationListResultPage struct {
+ fn func(context.Context, OperationListResult) (OperationListResult, error)
+ olr OperationListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *OperationListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OperationListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.olr)
+ if err != nil {
+ return err
+ }
+ page.olr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *OperationListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page OperationListResultPage) NotDone() bool {
+ return !page.olr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page OperationListResultPage) Response() OperationListResult {
+ return page.olr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page OperationListResultPage) Values() []Operation {
+ if page.olr.IsEmpty() {
+ return nil
+ }
+ return *page.olr.Value
+}
+
+// Creates a new instance of the OperationListResultPage type.
+func NewOperationListResultPage(getNextPage func(context.Context, OperationListResult) (OperationListResult, error)) OperationListResultPage {
+ return OperationListResultPage{fn: getNextPage}
+}
+
+// PeerAsn the essential information related to the peer's ASN.
+type PeerAsn struct {
+ autorest.Response `json:"-"`
+ // PeerAsnProperties - The properties that define a peer's ASN.
+ *PeerAsnProperties `json:"properties,omitempty"`
+ // Name - READ-ONLY; The name of the resource.
+ Name *string `json:"name,omitempty"`
+ // ID - READ-ONLY; The ID of the resource.
+ ID *string `json:"id,omitempty"`
+ // Type - READ-ONLY; The type of the resource.
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for PeerAsn.
+func (pa PeerAsn) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if pa.PeerAsnProperties != nil {
+ objectMap["properties"] = pa.PeerAsnProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for PeerAsn struct.
+func (pa *PeerAsn) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var peerAsnProperties PeerAsnProperties
+ err = json.Unmarshal(*v, &peerAsnProperties)
+ if err != nil {
+ return err
+ }
+ pa.PeerAsnProperties = &peerAsnProperties
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ pa.Name = &name
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ pa.ID = &ID
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ pa.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// PeerAsnListResult the paginated list of peer ASNs.
+type PeerAsnListResult struct {
+ autorest.Response `json:"-"`
+ // Value - The list of peer ASNs.
+ Value *[]PeerAsn `json:"value,omitempty"`
+ // NextLink - The link to fetch the next page of peer ASNs.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// PeerAsnListResultIterator provides access to a complete listing of PeerAsn values.
+type PeerAsnListResultIterator struct {
+ i int
+ page PeerAsnListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *PeerAsnListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PeerAsnListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *PeerAsnListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter PeerAsnListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter PeerAsnListResultIterator) Response() PeerAsnListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter PeerAsnListResultIterator) Value() PeerAsn {
+ if !iter.page.NotDone() {
+ return PeerAsn{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the PeerAsnListResultIterator type.
+func NewPeerAsnListResultIterator(page PeerAsnListResultPage) PeerAsnListResultIterator {
+ return PeerAsnListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (palr PeerAsnListResult) IsEmpty() bool {
+ return palr.Value == nil || len(*palr.Value) == 0
+}
+
+// peerAsnListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (palr PeerAsnListResult) peerAsnListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if palr.NextLink == nil || len(to.String(palr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(palr.NextLink)))
+}
+
+// PeerAsnListResultPage contains a page of PeerAsn values.
+type PeerAsnListResultPage struct {
+ fn func(context.Context, PeerAsnListResult) (PeerAsnListResult, error)
+ palr PeerAsnListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *PeerAsnListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PeerAsnListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.palr)
+ if err != nil {
+ return err
+ }
+ page.palr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *PeerAsnListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page PeerAsnListResultPage) NotDone() bool {
+ return !page.palr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page PeerAsnListResultPage) Response() PeerAsnListResult {
+ return page.palr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page PeerAsnListResultPage) Values() []PeerAsn {
+ if page.palr.IsEmpty() {
+ return nil
+ }
+ return *page.palr.Value
+}
+
+// Creates a new instance of the PeerAsnListResultPage type.
+func NewPeerAsnListResultPage(getNextPage func(context.Context, PeerAsnListResult) (PeerAsnListResult, error)) PeerAsnListResultPage {
+ return PeerAsnListResultPage{fn: getNextPage}
+}
+
+// PeerAsnProperties the properties that define a peer's ASN.
+type PeerAsnProperties struct {
+ // PeerAsn - The Autonomous System Number (ASN) of the peer.
+ PeerAsn *int32 `json:"peerAsn,omitempty"`
+ // PeerContactInfo - The contact information of the peer.
+ PeerContactInfo *ContactInfo `json:"peerContactInfo,omitempty"`
+ // PeerName - The name of the peer.
+ PeerName *string `json:"peerName,omitempty"`
+ // ValidationState - The validation state of the ASN associated with the peer. Possible values include: 'ValidationStateNone', 'ValidationStatePending', 'ValidationStateApproved', 'ValidationStateFailed'
+ ValidationState ValidationState `json:"validationState,omitempty"`
+}
+
+// Properties the properties that define connectivity to the Microsoft Cloud Edge.
+type Properties struct {
+ // Direct - The properties that define a direct peering.
+ Direct *PropertiesDirect `json:"direct,omitempty"`
+ // Exchange - The properties that define an exchange peering.
+ Exchange *PropertiesExchange `json:"exchange,omitempty"`
+ // PeeringLocation - The location of the peering.
+ PeeringLocation *string `json:"peeringLocation,omitempty"`
+ // ProvisioningState - READ-ONLY; The provisioning state of the resource. Possible values include: 'Succeeded', 'Updating', 'Deleting', 'Failed'
+ ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
+}
+
+// PropertiesDirect the properties that define a direct peering.
+type PropertiesDirect struct {
+ // Connections - The set of connections that constitute a direct peering.
+ Connections *[]DirectConnection `json:"connections,omitempty"`
+ // UseForPeeringService - The flag that indicates whether or not the peering is used for peering service.
+ UseForPeeringService *bool `json:"useForPeeringService,omitempty"`
+ // PeerAsn - The reference of the peer ASN.
+ PeerAsn *SubResource `json:"peerAsn,omitempty"`
+ // DirectPeeringType - The type of direct peering. Possible values include: 'Edge', 'Transit', 'Cdn', 'Internal'
+ DirectPeeringType DirectPeeringType `json:"directPeeringType,omitempty"`
+}
+
+// PropertiesExchange the properties that define an exchange peering.
+type PropertiesExchange struct {
+ // Connections - The set of connections that constitute an exchange peering.
+ Connections *[]ExchangeConnection `json:"connections,omitempty"`
+ // PeerAsn - The reference of the peer ASN.
+ PeerAsn *SubResource `json:"peerAsn,omitempty"`
+}
+
+// Resource the ARM resource class.
+type Resource struct {
+ // Name - READ-ONLY; The name of the resource.
+ Name *string `json:"name,omitempty"`
+ // ID - READ-ONLY; The ID of the resource.
+ ID *string `json:"id,omitempty"`
+ // Type - READ-ONLY; The type of the resource.
+ Type *string `json:"type,omitempty"`
+}
+
+// ResourceTags the resource tags.
+type ResourceTags struct {
+ // Tags - Gets or sets the tags, a dictionary of descriptors arm object
+ Tags map[string]*string `json:"tags"`
+}
+
+// MarshalJSON is the custom marshaler for ResourceTags.
+func (rt ResourceTags) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if rt.Tags != nil {
+ objectMap["tags"] = rt.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// Service peering Service
+type Service struct {
+ autorest.Response `json:"-"`
+ // ServiceProperties - The properties that define a peering service.
+ *ServiceProperties `json:"properties,omitempty"`
+ // Location - The location of the resource.
+ Location *string `json:"location,omitempty"`
+ // Tags - The resource tags.
+ Tags map[string]*string `json:"tags"`
+ // Name - READ-ONLY; The name of the resource.
+ Name *string `json:"name,omitempty"`
+ // ID - READ-ONLY; The ID of the resource.
+ ID *string `json:"id,omitempty"`
+ // Type - READ-ONLY; The type of the resource.
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for Service.
+func (s Service) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if s.ServiceProperties != nil {
+ objectMap["properties"] = s.ServiceProperties
+ }
+ if s.Location != nil {
+ objectMap["location"] = s.Location
+ }
+ if s.Tags != nil {
+ objectMap["tags"] = s.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for Service struct.
+func (s *Service) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var serviceProperties ServiceProperties
+ err = json.Unmarshal(*v, &serviceProperties)
+ if err != nil {
+ return err
+ }
+ s.ServiceProperties = &serviceProperties
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ s.Location = &location
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ s.Tags = tags
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ s.Name = &name
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ s.ID = &ID
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ s.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// ServiceListResult the paginated list of peering services.
+type ServiceListResult struct {
+ autorest.Response `json:"-"`
+ // Value - The list of peering services.
+ Value *[]Service `json:"value,omitempty"`
+ // NextLink - The link to fetch the next page of peering services.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// ServiceListResultIterator provides access to a complete listing of Service values.
+type ServiceListResultIterator struct {
+ i int
+ page ServiceListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *ServiceListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ServiceListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *ServiceListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter ServiceListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter ServiceListResultIterator) Response() ServiceListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter ServiceListResultIterator) Value() Service {
+ if !iter.page.NotDone() {
+ return Service{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the ServiceListResultIterator type.
+func NewServiceListResultIterator(page ServiceListResultPage) ServiceListResultIterator {
+ return ServiceListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (slr ServiceListResult) IsEmpty() bool {
+ return slr.Value == nil || len(*slr.Value) == 0
+}
+
+// serviceListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (slr ServiceListResult) serviceListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if slr.NextLink == nil || len(to.String(slr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(slr.NextLink)))
+}
+
+// ServiceListResultPage contains a page of Service values.
+type ServiceListResultPage struct {
+ fn func(context.Context, ServiceListResult) (ServiceListResult, error)
+ slr ServiceListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *ServiceListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ServiceListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.slr)
+ if err != nil {
+ return err
+ }
+ page.slr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *ServiceListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page ServiceListResultPage) NotDone() bool {
+ return !page.slr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page ServiceListResultPage) Response() ServiceListResult {
+ return page.slr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page ServiceListResultPage) Values() []Service {
+ if page.slr.IsEmpty() {
+ return nil
+ }
+ return *page.slr.Value
+}
+
+// Creates a new instance of the ServiceListResultPage type.
+func NewServiceListResultPage(getNextPage func(context.Context, ServiceListResult) (ServiceListResult, error)) ServiceListResultPage {
+ return ServiceListResultPage{fn: getNextPage}
+}
+
+// ServiceLocation peeringService location
+type ServiceLocation struct {
+ // ServiceLocationProperties - The properties that define a peering service location.
+ *ServiceLocationProperties `json:"properties,omitempty"`
+ // Name - READ-ONLY; The name of the resource.
+ Name *string `json:"name,omitempty"`
+ // ID - READ-ONLY; The ID of the resource.
+ ID *string `json:"id,omitempty"`
+ // Type - READ-ONLY; The type of the resource.
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ServiceLocation.
+func (sl ServiceLocation) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if sl.ServiceLocationProperties != nil {
+ objectMap["properties"] = sl.ServiceLocationProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ServiceLocation struct.
+func (sl *ServiceLocation) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var serviceLocationProperties ServiceLocationProperties
+ err = json.Unmarshal(*v, &serviceLocationProperties)
+ if err != nil {
+ return err
+ }
+ sl.ServiceLocationProperties = &serviceLocationProperties
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ sl.Name = &name
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ sl.ID = &ID
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ sl.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// ServiceLocationListResult the paginated list of peering service locations.
+type ServiceLocationListResult struct {
+ autorest.Response `json:"-"`
+ // Value - The list of peering service locations.
+ Value *[]ServiceLocation `json:"value,omitempty"`
+ // NextLink - The link to fetch the next page of peering service locations.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// ServiceLocationListResultIterator provides access to a complete listing of ServiceLocation values.
+type ServiceLocationListResultIterator struct {
+ i int
+ page ServiceLocationListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *ServiceLocationListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ServiceLocationListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *ServiceLocationListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter ServiceLocationListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter ServiceLocationListResultIterator) Response() ServiceLocationListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter ServiceLocationListResultIterator) Value() ServiceLocation {
+ if !iter.page.NotDone() {
+ return ServiceLocation{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the ServiceLocationListResultIterator type.
+func NewServiceLocationListResultIterator(page ServiceLocationListResultPage) ServiceLocationListResultIterator {
+ return ServiceLocationListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (sllr ServiceLocationListResult) IsEmpty() bool {
+ return sllr.Value == nil || len(*sllr.Value) == 0
+}
+
+// serviceLocationListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (sllr ServiceLocationListResult) serviceLocationListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if sllr.NextLink == nil || len(to.String(sllr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(sllr.NextLink)))
+}
+
+// ServiceLocationListResultPage contains a page of ServiceLocation values.
+type ServiceLocationListResultPage struct {
+ fn func(context.Context, ServiceLocationListResult) (ServiceLocationListResult, error)
+ sllr ServiceLocationListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *ServiceLocationListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ServiceLocationListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.sllr)
+ if err != nil {
+ return err
+ }
+ page.sllr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *ServiceLocationListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page ServiceLocationListResultPage) NotDone() bool {
+ return !page.sllr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page ServiceLocationListResultPage) Response() ServiceLocationListResult {
+ return page.sllr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page ServiceLocationListResultPage) Values() []ServiceLocation {
+ if page.sllr.IsEmpty() {
+ return nil
+ }
+ return *page.sllr.Value
+}
+
+// Creates a new instance of the ServiceLocationListResultPage type.
+func NewServiceLocationListResultPage(getNextPage func(context.Context, ServiceLocationListResult) (ServiceLocationListResult, error)) ServiceLocationListResultPage {
+ return ServiceLocationListResultPage{fn: getNextPage}
+}
+
+// ServiceLocationProperties the properties that define connectivity to the Peering Service Location.
+type ServiceLocationProperties struct {
+ // Country - Country of the customer
+ Country *string `json:"country,omitempty"`
+ // State - State of the customer
+ State *string `json:"state,omitempty"`
+ // AzureRegion - Azure region for the location
+ AzureRegion *string `json:"azureRegion,omitempty"`
+}
+
+// ServicePrefix the peering service prefix class.
+type ServicePrefix struct {
+ autorest.Response `json:"-"`
+ // ServicePrefixProperties - Gets or sets the peering prefix properties.
+ *ServicePrefixProperties `json:"properties,omitempty"`
+ // Name - READ-ONLY; The name of the resource.
+ Name *string `json:"name,omitempty"`
+ // ID - READ-ONLY; The ID of the resource.
+ ID *string `json:"id,omitempty"`
+ // Type - READ-ONLY; The type of the resource.
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ServicePrefix.
+func (sp ServicePrefix) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if sp.ServicePrefixProperties != nil {
+ objectMap["properties"] = sp.ServicePrefixProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ServicePrefix struct.
+func (sp *ServicePrefix) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var servicePrefixProperties ServicePrefixProperties
+ err = json.Unmarshal(*v, &servicePrefixProperties)
+ if err != nil {
+ return err
+ }
+ sp.ServicePrefixProperties = &servicePrefixProperties
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ sp.Name = &name
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ sp.ID = &ID
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ sp.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// ServicePrefixListResult the paginated list of [T].
+type ServicePrefixListResult struct {
+ autorest.Response `json:"-"`
+ // Value - The list of [T].
+ Value *[]ServicePrefix `json:"value,omitempty"`
+ // NextLink - The link to fetch the next page of [T].
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// ServicePrefixListResultIterator provides access to a complete listing of ServicePrefix values.
+type ServicePrefixListResultIterator struct {
+ i int
+ page ServicePrefixListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *ServicePrefixListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ServicePrefixListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *ServicePrefixListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter ServicePrefixListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter ServicePrefixListResultIterator) Response() ServicePrefixListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter ServicePrefixListResultIterator) Value() ServicePrefix {
+ if !iter.page.NotDone() {
+ return ServicePrefix{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the ServicePrefixListResultIterator type.
+func NewServicePrefixListResultIterator(page ServicePrefixListResultPage) ServicePrefixListResultIterator {
+ return ServicePrefixListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (splr ServicePrefixListResult) IsEmpty() bool {
+ return splr.Value == nil || len(*splr.Value) == 0
+}
+
+// servicePrefixListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (splr ServicePrefixListResult) servicePrefixListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if splr.NextLink == nil || len(to.String(splr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(splr.NextLink)))
+}
+
+// ServicePrefixListResultPage contains a page of ServicePrefix values.
+type ServicePrefixListResultPage struct {
+ fn func(context.Context, ServicePrefixListResult) (ServicePrefixListResult, error)
+ splr ServicePrefixListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *ServicePrefixListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ServicePrefixListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.splr)
+ if err != nil {
+ return err
+ }
+ page.splr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *ServicePrefixListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page ServicePrefixListResultPage) NotDone() bool {
+ return !page.splr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page ServicePrefixListResultPage) Response() ServicePrefixListResult {
+ return page.splr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page ServicePrefixListResultPage) Values() []ServicePrefix {
+ if page.splr.IsEmpty() {
+ return nil
+ }
+ return *page.splr.Value
+}
+
+// Creates a new instance of the ServicePrefixListResultPage type.
+func NewServicePrefixListResultPage(getNextPage func(context.Context, ServicePrefixListResult) (ServicePrefixListResult, error)) ServicePrefixListResultPage {
+ return ServicePrefixListResultPage{fn: getNextPage}
+}
+
+// ServicePrefixProperties the peering service prefix properties class.
+type ServicePrefixProperties struct {
+ // Prefix - Valid route prefix
+ Prefix *string `json:"prefix,omitempty"`
+ // PrefixValidationState - The prefix validation state. Possible values include: 'PrefixValidationStateNone', 'PrefixValidationStateInvalid', 'PrefixValidationStateVerified', 'PrefixValidationStateFailed', 'PrefixValidationStatePending', 'PrefixValidationStateUnknown'
+ PrefixValidationState PrefixValidationState `json:"prefixValidationState,omitempty"`
+ // LearnedType - The prefix learned type. Possible values include: 'LearnedTypeNone', 'LearnedTypeViaPartner', 'LearnedTypeViaSession'
+ LearnedType LearnedType `json:"learnedType,omitempty"`
+ // ProvisioningState - READ-ONLY; The provisioning state of the resource. Possible values include: 'Succeeded', 'Updating', 'Deleting', 'Failed'
+ ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
+}
+
+// ServiceProperties the properties that define connectivity to the Peering Service.
+type ServiceProperties struct {
+ // PeeringServiceLocation - The PeeringServiceLocation of the Customer.
+ PeeringServiceLocation *string `json:"peeringServiceLocation,omitempty"`
+ // PeeringServiceProvider - The MAPS Provider Name.
+ PeeringServiceProvider *string `json:"peeringServiceProvider,omitempty"`
+ // ProvisioningState - READ-ONLY; The provisioning state of the resource. Possible values include: 'Succeeded', 'Updating', 'Deleting', 'Failed'
+ ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
+}
+
+// ServiceProvider peeringService provider
+type ServiceProvider struct {
+ // ServiceProviderProperties - The properties that define a peering service provider.
+ *ServiceProviderProperties `json:"properties,omitempty"`
+ // Name - READ-ONLY; The name of the resource.
+ Name *string `json:"name,omitempty"`
+ // ID - READ-ONLY; The ID of the resource.
+ ID *string `json:"id,omitempty"`
+ // Type - READ-ONLY; The type of the resource.
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ServiceProvider.
+func (sp ServiceProvider) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if sp.ServiceProviderProperties != nil {
+ objectMap["properties"] = sp.ServiceProviderProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ServiceProvider struct.
+func (sp *ServiceProvider) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var serviceProviderProperties ServiceProviderProperties
+ err = json.Unmarshal(*v, &serviceProviderProperties)
+ if err != nil {
+ return err
+ }
+ sp.ServiceProviderProperties = &serviceProviderProperties
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ sp.Name = &name
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ sp.ID = &ID
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ sp.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// ServiceProviderListResult the paginated list of peering service providers.
+type ServiceProviderListResult struct {
+ autorest.Response `json:"-"`
+ // Value - The list of peering service providers.
+ Value *[]ServiceProvider `json:"value,omitempty"`
+ // NextLink - The link to fetch the next page of peering service providers.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// ServiceProviderListResultIterator provides access to a complete listing of ServiceProvider values.
+type ServiceProviderListResultIterator struct {
+ i int
+ page ServiceProviderListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *ServiceProviderListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ServiceProviderListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *ServiceProviderListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter ServiceProviderListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter ServiceProviderListResultIterator) Response() ServiceProviderListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter ServiceProviderListResultIterator) Value() ServiceProvider {
+ if !iter.page.NotDone() {
+ return ServiceProvider{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the ServiceProviderListResultIterator type.
+func NewServiceProviderListResultIterator(page ServiceProviderListResultPage) ServiceProviderListResultIterator {
+ return ServiceProviderListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (splr ServiceProviderListResult) IsEmpty() bool {
+ return splr.Value == nil || len(*splr.Value) == 0
+}
+
+// serviceProviderListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (splr ServiceProviderListResult) serviceProviderListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if splr.NextLink == nil || len(to.String(splr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(splr.NextLink)))
+}
+
+// ServiceProviderListResultPage contains a page of ServiceProvider values.
+type ServiceProviderListResultPage struct {
+ fn func(context.Context, ServiceProviderListResult) (ServiceProviderListResult, error)
+ splr ServiceProviderListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *ServiceProviderListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ServiceProviderListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.splr)
+ if err != nil {
+ return err
+ }
+ page.splr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *ServiceProviderListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page ServiceProviderListResultPage) NotDone() bool {
+ return !page.splr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page ServiceProviderListResultPage) Response() ServiceProviderListResult {
+ return page.splr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page ServiceProviderListResultPage) Values() []ServiceProvider {
+ if page.splr.IsEmpty() {
+ return nil
+ }
+ return *page.splr.Value
+}
+
+// Creates a new instance of the ServiceProviderListResultPage type.
+func NewServiceProviderListResultPage(getNextPage func(context.Context, ServiceProviderListResult) (ServiceProviderListResult, error)) ServiceProviderListResultPage {
+ return ServiceProviderListResultPage{fn: getNextPage}
+}
+
+// ServiceProviderProperties the properties that define connectivity to the Peering Service Provider.
+type ServiceProviderProperties struct {
+ // ServiceProviderName - The name of the service provider.
+ ServiceProviderName *string `json:"serviceProviderName,omitempty"`
+}
+
+// Sku the SKU that defines the tier and kind of the peering.
+type Sku struct {
+ // Name - The name of the peering SKU. Possible values include: 'BasicExchangeFree', 'BasicDirectFree', 'PremiumDirectFree', 'PremiumExchangeMetered', 'PremiumDirectMetered', 'PremiumDirectUnlimited'
+ Name Name `json:"name,omitempty"`
+ // Tier - The tier of the peering SKU. Possible values include: 'Basic', 'Premium'
+ Tier Tier `json:"tier,omitempty"`
+ // Family - The family of the peering SKU. Possible values include: 'Direct', 'Exchange'
+ Family Family `json:"family,omitempty"`
+ // Size - The size of the peering SKU. Possible values include: 'Free', 'Metered', 'Unlimited'
+ Size Size `json:"size,omitempty"`
+}
+
+// String ...
+type String struct {
+ autorest.Response `json:"-"`
+ // Value - Possible values include: 'Available', 'UnAvailable'
+ Value *string `json:"value,omitempty"`
+}
+
+// SubResource the sub resource.
+type SubResource struct {
+ // ID - The identifier of the referenced resource.
+ ID *string `json:"id,omitempty"`
+}
diff --git a/services/preview/peering/mgmt/2019-08-01-preview/peering/operations.go b/services/preview/peering/mgmt/2019-08-01-preview/peering/operations.go
new file mode 100644
index 000000000000..63f06a85cf05
--- /dev/null
+++ b/services/preview/peering/mgmt/2019-08-01-preview/peering/operations.go
@@ -0,0 +1,147 @@
+package peering
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// OperationsClient is the peering Client
+type OperationsClient struct {
+ BaseClient
+}
+
+// NewOperationsClient creates an instance of the OperationsClient client.
+func NewOperationsClient(subscriptionID string) OperationsClient {
+ return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client.
+func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient {
+ return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// List lists all of the available API operations for peering resources.
+func (client OperationsClient) List(ctx context.Context) (result OperationListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List")
+ defer func() {
+ sc := -1
+ if result.olr.Response.Response != nil {
+ sc = result.olr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.OperationsClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.olr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "peering.OperationsClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.olr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.OperationsClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) {
+ const APIVersion = "2019-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPath("/providers/Microsoft.Peering/operations"),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client OperationsClient) listNextResults(ctx context.Context, lastResults OperationListResult) (result OperationListResult, err error) {
+ req, err := lastResults.operationListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "peering.OperationsClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "peering.OperationsClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.OperationsClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client OperationsClient) ListComplete(ctx context.Context) (result OperationListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx)
+ return
+}
diff --git a/services/preview/peering/mgmt/2019-08-01-preview/peering/peerasns.go b/services/preview/peering/mgmt/2019-08-01-preview/peering/peerasns.go
new file mode 100644
index 000000000000..475bbf87f835
--- /dev/null
+++ b/services/preview/peering/mgmt/2019-08-01-preview/peering/peerasns.go
@@ -0,0 +1,379 @@
+package peering
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// PeerAsnsClient is the peering Client
+type PeerAsnsClient struct {
+ BaseClient
+}
+
+// NewPeerAsnsClient creates an instance of the PeerAsnsClient client.
+func NewPeerAsnsClient(subscriptionID string) PeerAsnsClient {
+ return NewPeerAsnsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewPeerAsnsClientWithBaseURI creates an instance of the PeerAsnsClient client.
+func NewPeerAsnsClientWithBaseURI(baseURI string, subscriptionID string) PeerAsnsClient {
+ return PeerAsnsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate creates a new peer ASN or updates an existing peer ASN with the specified name under the given
+// subscription.
+// Parameters:
+// peerAsnName - the peer ASN name.
+// peerAsn - the peer ASN.
+func (client PeerAsnsClient) CreateOrUpdate(ctx context.Context, peerAsnName string, peerAsn PeerAsn) (result PeerAsn, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PeerAsnsClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.CreateOrUpdatePreparer(ctx, peerAsnName, peerAsn)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.PeerAsnsClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateOrUpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "peering.PeerAsnsClient", "CreateOrUpdate", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.PeerAsnsClient", "CreateOrUpdate", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client PeerAsnsClient) CreateOrUpdatePreparer(ctx context.Context, peerAsnName string, peerAsn PeerAsn) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "peerAsnName": autorest.Encode("path", peerAsnName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Peering/peerAsns/{peerAsnName}", pathParameters),
+ autorest.WithJSON(peerAsn),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client PeerAsnsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client PeerAsnsClient) CreateOrUpdateResponder(resp *http.Response) (result PeerAsn, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes an existing peer ASN with the specified name under the given subscription.
+// Parameters:
+// peerAsnName - the peer ASN name.
+func (client PeerAsnsClient) Delete(ctx context.Context, peerAsnName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PeerAsnsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, peerAsnName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.PeerAsnsClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "peering.PeerAsnsClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.PeerAsnsClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client PeerAsnsClient) DeletePreparer(ctx context.Context, peerAsnName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "peerAsnName": autorest.Encode("path", peerAsnName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Peering/peerAsns/{peerAsnName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client PeerAsnsClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client PeerAsnsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets the peer ASN with the specified name under the given subscription.
+// Parameters:
+// peerAsnName - the peer ASN name.
+func (client PeerAsnsClient) Get(ctx context.Context, peerAsnName string) (result PeerAsn, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PeerAsnsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, peerAsnName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.PeerAsnsClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "peering.PeerAsnsClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.PeerAsnsClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client PeerAsnsClient) GetPreparer(ctx context.Context, peerAsnName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "peerAsnName": autorest.Encode("path", peerAsnName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Peering/peerAsns/{peerAsnName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client PeerAsnsClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client PeerAsnsClient) GetResponder(resp *http.Response) (result PeerAsn, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListBySubscription lists all of the peer ASNs under the given subscription.
+func (client PeerAsnsClient) ListBySubscription(ctx context.Context) (result PeerAsnListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PeerAsnsClient.ListBySubscription")
+ defer func() {
+ sc := -1
+ if result.palr.Response.Response != nil {
+ sc = result.palr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listBySubscriptionNextResults
+ req, err := client.ListBySubscriptionPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.PeerAsnsClient", "ListBySubscription", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListBySubscriptionSender(req)
+ if err != nil {
+ result.palr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "peering.PeerAsnsClient", "ListBySubscription", resp, "Failure sending request")
+ return
+ }
+
+ result.palr, err = client.ListBySubscriptionResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.PeerAsnsClient", "ListBySubscription", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListBySubscriptionPreparer prepares the ListBySubscription request.
+func (client PeerAsnsClient) ListBySubscriptionPreparer(ctx context.Context) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Peering/peerAsns", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListBySubscriptionSender sends the ListBySubscription request. The method will close the
+// http.Response Body if it receives an error.
+func (client PeerAsnsClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always
+// closes the http.Response Body.
+func (client PeerAsnsClient) ListBySubscriptionResponder(resp *http.Response) (result PeerAsnListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listBySubscriptionNextResults retrieves the next set of results, if any.
+func (client PeerAsnsClient) listBySubscriptionNextResults(ctx context.Context, lastResults PeerAsnListResult) (result PeerAsnListResult, err error) {
+ req, err := lastResults.peerAsnListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "peering.PeerAsnsClient", "listBySubscriptionNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListBySubscriptionSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "peering.PeerAsnsClient", "listBySubscriptionNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListBySubscriptionResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.PeerAsnsClient", "listBySubscriptionNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListBySubscriptionComplete enumerates all values, automatically crossing page boundaries as required.
+func (client PeerAsnsClient) ListBySubscriptionComplete(ctx context.Context) (result PeerAsnListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PeerAsnsClient.ListBySubscription")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListBySubscription(ctx)
+ return
+}
diff --git a/services/preview/peering/mgmt/2019-08-01-preview/peering/peeringapi/interfaces.go b/services/preview/peering/mgmt/2019-08-01-preview/peering/peeringapi/interfaces.go
new file mode 100644
index 000000000000..f3c448884e9d
--- /dev/null
+++ b/services/preview/peering/mgmt/2019-08-01-preview/peering/peeringapi/interfaces.go
@@ -0,0 +1,116 @@
+package peeringapi
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/azure-sdk-for-go/services/preview/peering/mgmt/2019-08-01-preview/peering"
+ "github.com/Azure/go-autorest/autorest"
+)
+
+// BaseClientAPI contains the set of methods on the BaseClient type.
+type BaseClientAPI interface {
+ CheckServiceProviderAvailability(ctx context.Context, checkServiceProviderAvailabilityInput peering.CheckServiceProviderAvailabilityInput) (result peering.String, err error)
+}
+
+var _ BaseClientAPI = (*peering.BaseClient)(nil)
+
+// LegacyPeeringsClientAPI contains the set of methods on the LegacyPeeringsClient type.
+type LegacyPeeringsClientAPI interface {
+ List(ctx context.Context, peeringLocation string, kind string) (result peering.ListResultPage, err error)
+}
+
+var _ LegacyPeeringsClientAPI = (*peering.LegacyPeeringsClient)(nil)
+
+// OperationsClientAPI contains the set of methods on the OperationsClient type.
+type OperationsClientAPI interface {
+ List(ctx context.Context) (result peering.OperationListResultPage, err error)
+}
+
+var _ OperationsClientAPI = (*peering.OperationsClient)(nil)
+
+// PeerAsnsClientAPI contains the set of methods on the PeerAsnsClient type.
+type PeerAsnsClientAPI interface {
+ CreateOrUpdate(ctx context.Context, peerAsnName string, peerAsn peering.PeerAsn) (result peering.PeerAsn, err error)
+ Delete(ctx context.Context, peerAsnName string) (result autorest.Response, err error)
+ Get(ctx context.Context, peerAsnName string) (result peering.PeerAsn, err error)
+ ListBySubscription(ctx context.Context) (result peering.PeerAsnListResultPage, err error)
+}
+
+var _ PeerAsnsClientAPI = (*peering.PeerAsnsClient)(nil)
+
+// LocationsClientAPI contains the set of methods on the LocationsClient type.
+type LocationsClientAPI interface {
+ List(ctx context.Context, kind string, directPeeringType string) (result peering.LocationListResultPage, err error)
+}
+
+var _ LocationsClientAPI = (*peering.LocationsClient)(nil)
+
+// PeeringsClientAPI contains the set of methods on the PeeringsClient type.
+type PeeringsClientAPI interface {
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, peeringName string, peering peering.Model) (result peering.Model, err error)
+ Delete(ctx context.Context, resourceGroupName string, peeringName string) (result autorest.Response, err error)
+ Get(ctx context.Context, resourceGroupName string, peeringName string) (result peering.Model, err error)
+ ListByResourceGroup(ctx context.Context, resourceGroupName string) (result peering.ListResultPage, err error)
+ ListBySubscription(ctx context.Context) (result peering.ListResultPage, err error)
+ Update(ctx context.Context, resourceGroupName string, peeringName string, tags peering.ResourceTags) (result peering.Model, err error)
+}
+
+var _ PeeringsClientAPI = (*peering.PeeringsClient)(nil)
+
+// ServiceLocationsClientAPI contains the set of methods on the ServiceLocationsClient type.
+type ServiceLocationsClientAPI interface {
+ List(ctx context.Context) (result peering.ServiceLocationListResultPage, err error)
+}
+
+var _ ServiceLocationsClientAPI = (*peering.ServiceLocationsClient)(nil)
+
+// ServicePrefixesClientAPI contains the set of methods on the ServicePrefixesClient type.
+type ServicePrefixesClientAPI interface {
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, peeringServiceName string, prefixName string, peeringServicePrefix peering.ServicePrefix) (result peering.ServicePrefix, err error)
+ Delete(ctx context.Context, resourceGroupName string, peeringServiceName string, prefixName string) (result autorest.Response, err error)
+ Get(ctx context.Context, resourceGroupName string, peeringServiceName string, prefixName string) (result peering.ServicePrefix, err error)
+}
+
+var _ ServicePrefixesClientAPI = (*peering.ServicePrefixesClient)(nil)
+
+// PrefixesClientAPI contains the set of methods on the PrefixesClient type.
+type PrefixesClientAPI interface {
+ ListByPeeringService(ctx context.Context, resourceGroupName string, peeringServiceName string) (result peering.ServicePrefixListResultPage, err error)
+}
+
+var _ PrefixesClientAPI = (*peering.PrefixesClient)(nil)
+
+// ServiceProvidersClientAPI contains the set of methods on the ServiceProvidersClient type.
+type ServiceProvidersClientAPI interface {
+ List(ctx context.Context) (result peering.ServiceProviderListResultPage, err error)
+}
+
+var _ ServiceProvidersClientAPI = (*peering.ServiceProvidersClient)(nil)
+
+// ServicesClientAPI contains the set of methods on the ServicesClient type.
+type ServicesClientAPI interface {
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, peeringServiceName string, peeringService peering.Service) (result peering.Service, err error)
+ Delete(ctx context.Context, resourceGroupName string, peeringServiceName string) (result autorest.Response, err error)
+ Get(ctx context.Context, resourceGroupName string, peeringServiceName string) (result peering.Service, err error)
+ ListByResourceGroup(ctx context.Context, resourceGroupName string) (result peering.ServiceListResultPage, err error)
+ ListBySubscription(ctx context.Context) (result peering.ServiceListResultPage, err error)
+ Update(ctx context.Context, resourceGroupName string, peeringServiceName string, tags peering.ResourceTags) (result peering.Service, err error)
+}
+
+var _ ServicesClientAPI = (*peering.ServicesClient)(nil)
diff --git a/services/preview/peering/mgmt/2019-08-01-preview/peering/peerings.go b/services/preview/peering/mgmt/2019-08-01-preview/peering/peerings.go
new file mode 100644
index 000000000000..06e815d59656
--- /dev/null
+++ b/services/preview/peering/mgmt/2019-08-01-preview/peering/peerings.go
@@ -0,0 +1,586 @@
+package peering
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// PeeringsClient is the peering Client
+type PeeringsClient struct {
+ BaseClient
+}
+
+// NewPeeringsClient creates an instance of the PeeringsClient client.
+func NewPeeringsClient(subscriptionID string) PeeringsClient {
+ return NewPeeringsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewPeeringsClientWithBaseURI creates an instance of the PeeringsClient client.
+func NewPeeringsClientWithBaseURI(baseURI string, subscriptionID string) PeeringsClient {
+ return PeeringsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate creates a new peering or updates an existing peering with the specified name under the given
+// subscription and resource group.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// peeringName - the name of the peering.
+// peering - the properties needed to create or update a peering.
+func (client PeeringsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, peeringName string, peering Model) (result Model, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PeeringsClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: peering,
+ Constraints: []validation.Constraint{{Target: "peering.Sku", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "peering.Location", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("peering.PeeringsClient", "CreateOrUpdate", err.Error())
+ }
+
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, peeringName, peering)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.PeeringsClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateOrUpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "peering.PeeringsClient", "CreateOrUpdate", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.PeeringsClient", "CreateOrUpdate", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client PeeringsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, peeringName string, peering Model) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "peeringName": autorest.Encode("path", peeringName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering/peerings/{peeringName}", pathParameters),
+ autorest.WithJSON(peering),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client PeeringsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client PeeringsClient) CreateOrUpdateResponder(resp *http.Response) (result Model, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes an existing peering with the specified name under the given subscription and resource group.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// peeringName - the name of the peering.
+func (client PeeringsClient) Delete(ctx context.Context, resourceGroupName string, peeringName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PeeringsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, resourceGroupName, peeringName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.PeeringsClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "peering.PeeringsClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.PeeringsClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client PeeringsClient) DeletePreparer(ctx context.Context, resourceGroupName string, peeringName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "peeringName": autorest.Encode("path", peeringName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering/peerings/{peeringName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client PeeringsClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client PeeringsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets an existing peering with the specified name under the given subscription and resource group.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// peeringName - the name of the peering.
+func (client PeeringsClient) Get(ctx context.Context, resourceGroupName string, peeringName string) (result Model, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PeeringsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, resourceGroupName, peeringName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.PeeringsClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "peering.PeeringsClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.PeeringsClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client PeeringsClient) GetPreparer(ctx context.Context, resourceGroupName string, peeringName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "peeringName": autorest.Encode("path", peeringName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering/peerings/{peeringName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client PeeringsClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client PeeringsClient) GetResponder(resp *http.Response) (result Model, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByResourceGroup lists all of the peerings under the given subscription and resource group.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+func (client PeeringsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result ListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PeeringsClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.lr.Response.Response != nil {
+ sc = result.lr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listByResourceGroupNextResults
+ req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.PeeringsClient", "ListByResourceGroup", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByResourceGroupSender(req)
+ if err != nil {
+ result.lr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "peering.PeeringsClient", "ListByResourceGroup", resp, "Failure sending request")
+ return
+ }
+
+ result.lr, err = client.ListByResourceGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.PeeringsClient", "ListByResourceGroup", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
+func (client PeeringsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering/peerings", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
+// http.Response Body if it receives an error.
+func (client PeeringsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
+// closes the http.Response Body.
+func (client PeeringsClient) ListByResourceGroupResponder(resp *http.Response) (result ListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByResourceGroupNextResults retrieves the next set of results, if any.
+func (client PeeringsClient) listByResourceGroupNextResults(ctx context.Context, lastResults ListResult) (result ListResult, err error) {
+ req, err := lastResults.listResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "peering.PeeringsClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByResourceGroupSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "peering.PeeringsClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByResourceGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.PeeringsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
+func (client PeeringsClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result ListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PeeringsClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByResourceGroup(ctx, resourceGroupName)
+ return
+}
+
+// ListBySubscription lists all of the peerings under the given subscription.
+func (client PeeringsClient) ListBySubscription(ctx context.Context) (result ListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PeeringsClient.ListBySubscription")
+ defer func() {
+ sc := -1
+ if result.lr.Response.Response != nil {
+ sc = result.lr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listBySubscriptionNextResults
+ req, err := client.ListBySubscriptionPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.PeeringsClient", "ListBySubscription", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListBySubscriptionSender(req)
+ if err != nil {
+ result.lr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "peering.PeeringsClient", "ListBySubscription", resp, "Failure sending request")
+ return
+ }
+
+ result.lr, err = client.ListBySubscriptionResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.PeeringsClient", "ListBySubscription", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListBySubscriptionPreparer prepares the ListBySubscription request.
+func (client PeeringsClient) ListBySubscriptionPreparer(ctx context.Context) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Peering/peerings", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListBySubscriptionSender sends the ListBySubscription request. The method will close the
+// http.Response Body if it receives an error.
+func (client PeeringsClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always
+// closes the http.Response Body.
+func (client PeeringsClient) ListBySubscriptionResponder(resp *http.Response) (result ListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listBySubscriptionNextResults retrieves the next set of results, if any.
+func (client PeeringsClient) listBySubscriptionNextResults(ctx context.Context, lastResults ListResult) (result ListResult, err error) {
+ req, err := lastResults.listResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "peering.PeeringsClient", "listBySubscriptionNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListBySubscriptionSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "peering.PeeringsClient", "listBySubscriptionNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListBySubscriptionResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.PeeringsClient", "listBySubscriptionNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListBySubscriptionComplete enumerates all values, automatically crossing page boundaries as required.
+func (client PeeringsClient) ListBySubscriptionComplete(ctx context.Context) (result ListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PeeringsClient.ListBySubscription")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListBySubscription(ctx)
+ return
+}
+
+// Update updates tags for a peering with the specified name under the given subscription and resource group.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// peeringName - the name of the peering.
+// tags - the resource tags.
+func (client PeeringsClient) Update(ctx context.Context, resourceGroupName string, peeringName string, tags ResourceTags) (result Model, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PeeringsClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.UpdatePreparer(ctx, resourceGroupName, peeringName, tags)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.PeeringsClient", "Update", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.UpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "peering.PeeringsClient", "Update", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.UpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.PeeringsClient", "Update", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// UpdatePreparer prepares the Update request.
+func (client PeeringsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, peeringName string, tags ResourceTags) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "peeringName": autorest.Encode("path", peeringName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering/peerings/{peeringName}", pathParameters),
+ autorest.WithJSON(tags),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateSender sends the Update request. The method will close the
+// http.Response Body if it receives an error.
+func (client PeeringsClient) UpdateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// UpdateResponder handles the response to the Update request. The method always
+// closes the http.Response Body.
+func (client PeeringsClient) UpdateResponder(resp *http.Response) (result Model, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/preview/peering/mgmt/2019-08-01-preview/peering/prefixes.go b/services/preview/peering/mgmt/2019-08-01-preview/peering/prefixes.go
new file mode 100644
index 000000000000..449f7153a44b
--- /dev/null
+++ b/services/preview/peering/mgmt/2019-08-01-preview/peering/prefixes.go
@@ -0,0 +1,156 @@
+package peering
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// PrefixesClient is the peering Client
+type PrefixesClient struct {
+ BaseClient
+}
+
+// NewPrefixesClient creates an instance of the PrefixesClient client.
+func NewPrefixesClient(subscriptionID string) PrefixesClient {
+ return NewPrefixesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewPrefixesClientWithBaseURI creates an instance of the PrefixesClient client.
+func NewPrefixesClientWithBaseURI(baseURI string, subscriptionID string) PrefixesClient {
+ return PrefixesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// ListByPeeringService lists the peerings prefix in the resource group.
+// Parameters:
+// resourceGroupName - the resource group name.
+// peeringServiceName - the peering service name.
+func (client PrefixesClient) ListByPeeringService(ctx context.Context, resourceGroupName string, peeringServiceName string) (result ServicePrefixListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PrefixesClient.ListByPeeringService")
+ defer func() {
+ sc := -1
+ if result.splr.Response.Response != nil {
+ sc = result.splr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listByPeeringServiceNextResults
+ req, err := client.ListByPeeringServicePreparer(ctx, resourceGroupName, peeringServiceName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.PrefixesClient", "ListByPeeringService", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByPeeringServiceSender(req)
+ if err != nil {
+ result.splr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "peering.PrefixesClient", "ListByPeeringService", resp, "Failure sending request")
+ return
+ }
+
+ result.splr, err = client.ListByPeeringServiceResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.PrefixesClient", "ListByPeeringService", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByPeeringServicePreparer prepares the ListByPeeringService request.
+func (client PrefixesClient) ListByPeeringServicePreparer(ctx context.Context, resourceGroupName string, peeringServiceName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "peeringServiceName": autorest.Encode("path", peeringServiceName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering/peeringServices/{peeringServiceName}/prefixes", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByPeeringServiceSender sends the ListByPeeringService request. The method will close the
+// http.Response Body if it receives an error.
+func (client PrefixesClient) ListByPeeringServiceSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByPeeringServiceResponder handles the response to the ListByPeeringService request. The method always
+// closes the http.Response Body.
+func (client PrefixesClient) ListByPeeringServiceResponder(resp *http.Response) (result ServicePrefixListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByPeeringServiceNextResults retrieves the next set of results, if any.
+func (client PrefixesClient) listByPeeringServiceNextResults(ctx context.Context, lastResults ServicePrefixListResult) (result ServicePrefixListResult, err error) {
+ req, err := lastResults.servicePrefixListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "peering.PrefixesClient", "listByPeeringServiceNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByPeeringServiceSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "peering.PrefixesClient", "listByPeeringServiceNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByPeeringServiceResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.PrefixesClient", "listByPeeringServiceNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByPeeringServiceComplete enumerates all values, automatically crossing page boundaries as required.
+func (client PrefixesClient) ListByPeeringServiceComplete(ctx context.Context, resourceGroupName string, peeringServiceName string) (result ServicePrefixListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PrefixesClient.ListByPeeringService")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByPeeringService(ctx, resourceGroupName, peeringServiceName)
+ return
+}
diff --git a/services/preview/peering/mgmt/2019-08-01-preview/peering/servicelocations.go b/services/preview/peering/mgmt/2019-08-01-preview/peering/servicelocations.go
new file mode 100644
index 000000000000..f20c06d98ee3
--- /dev/null
+++ b/services/preview/peering/mgmt/2019-08-01-preview/peering/servicelocations.go
@@ -0,0 +1,151 @@
+package peering
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// ServiceLocationsClient is the peering Client
+type ServiceLocationsClient struct {
+ BaseClient
+}
+
+// NewServiceLocationsClient creates an instance of the ServiceLocationsClient client.
+func NewServiceLocationsClient(subscriptionID string) ServiceLocationsClient {
+ return NewServiceLocationsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewServiceLocationsClientWithBaseURI creates an instance of the ServiceLocationsClient client.
+func NewServiceLocationsClientWithBaseURI(baseURI string, subscriptionID string) ServiceLocationsClient {
+ return ServiceLocationsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// List lists all of the available peering service locations for the specified kind of peering.
+func (client ServiceLocationsClient) List(ctx context.Context) (result ServiceLocationListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ServiceLocationsClient.List")
+ defer func() {
+ sc := -1
+ if result.sllr.Response.Response != nil {
+ sc = result.sllr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.ServiceLocationsClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.sllr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "peering.ServiceLocationsClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.sllr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.ServiceLocationsClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client ServiceLocationsClient) ListPreparer(ctx context.Context) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Peering/peeringServiceLocations", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client ServiceLocationsClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client ServiceLocationsClient) ListResponder(resp *http.Response) (result ServiceLocationListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client ServiceLocationsClient) listNextResults(ctx context.Context, lastResults ServiceLocationListResult) (result ServiceLocationListResult, err error) {
+ req, err := lastResults.serviceLocationListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "peering.ServiceLocationsClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "peering.ServiceLocationsClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.ServiceLocationsClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client ServiceLocationsClient) ListComplete(ctx context.Context) (result ServiceLocationListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ServiceLocationsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx)
+ return
+}
diff --git a/services/preview/peering/mgmt/2019-08-01-preview/peering/serviceprefixes.go b/services/preview/peering/mgmt/2019-08-01-preview/peering/serviceprefixes.go
new file mode 100644
index 000000000000..edcabf337f8d
--- /dev/null
+++ b/services/preview/peering/mgmt/2019-08-01-preview/peering/serviceprefixes.go
@@ -0,0 +1,280 @@
+package peering
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// ServicePrefixesClient is the peering Client
+type ServicePrefixesClient struct {
+ BaseClient
+}
+
+// NewServicePrefixesClient creates an instance of the ServicePrefixesClient client.
+func NewServicePrefixesClient(subscriptionID string) ServicePrefixesClient {
+ return NewServicePrefixesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewServicePrefixesClientWithBaseURI creates an instance of the ServicePrefixesClient client.
+func NewServicePrefixesClientWithBaseURI(baseURI string, subscriptionID string) ServicePrefixesClient {
+ return ServicePrefixesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate creates or updates the peering prefix.
+// Parameters:
+// resourceGroupName - the resource group name.
+// peeringServiceName - the peering service name.
+// prefixName - the prefix name
+// peeringServicePrefix - the IP prefix for an peering
+func (client ServicePrefixesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, peeringServiceName string, prefixName string, peeringServicePrefix ServicePrefix) (result ServicePrefix, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ServicePrefixesClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, peeringServiceName, prefixName, peeringServicePrefix)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.ServicePrefixesClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateOrUpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "peering.ServicePrefixesClient", "CreateOrUpdate", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.ServicePrefixesClient", "CreateOrUpdate", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client ServicePrefixesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, peeringServiceName string, prefixName string, peeringServicePrefix ServicePrefix) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "peeringServiceName": autorest.Encode("path", peeringServiceName),
+ "prefixName": autorest.Encode("path", prefixName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering/peeringServices/{peeringServiceName}/prefixes/{prefixName}", pathParameters),
+ autorest.WithJSON(peeringServicePrefix),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client ServicePrefixesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client ServicePrefixesClient) CreateOrUpdateResponder(resp *http.Response) (result ServicePrefix, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete removes the peering prefix.
+// Parameters:
+// resourceGroupName - the resource group name.
+// peeringServiceName - the peering service name.
+// prefixName - the prefix name
+func (client ServicePrefixesClient) Delete(ctx context.Context, resourceGroupName string, peeringServiceName string, prefixName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ServicePrefixesClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, resourceGroupName, peeringServiceName, prefixName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.ServicePrefixesClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "peering.ServicePrefixesClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.ServicePrefixesClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client ServicePrefixesClient) DeletePreparer(ctx context.Context, resourceGroupName string, peeringServiceName string, prefixName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "peeringServiceName": autorest.Encode("path", peeringServiceName),
+ "prefixName": autorest.Encode("path", prefixName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering/peeringServices/{peeringServiceName}/prefixes/{prefixName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client ServicePrefixesClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client ServicePrefixesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets the peering service prefix.
+// Parameters:
+// resourceGroupName - the resource group name.
+// peeringServiceName - the peering service name.
+// prefixName - the prefix name.
+func (client ServicePrefixesClient) Get(ctx context.Context, resourceGroupName string, peeringServiceName string, prefixName string) (result ServicePrefix, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ServicePrefixesClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, resourceGroupName, peeringServiceName, prefixName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.ServicePrefixesClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "peering.ServicePrefixesClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.ServicePrefixesClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client ServicePrefixesClient) GetPreparer(ctx context.Context, resourceGroupName string, peeringServiceName string, prefixName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "peeringServiceName": autorest.Encode("path", peeringServiceName),
+ "prefixName": autorest.Encode("path", prefixName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering/peeringServices/{peeringServiceName}/prefixes/{prefixName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client ServicePrefixesClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client ServicePrefixesClient) GetResponder(resp *http.Response) (result ServicePrefix, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/preview/peering/mgmt/2019-08-01-preview/peering/serviceproviders.go b/services/preview/peering/mgmt/2019-08-01-preview/peering/serviceproviders.go
new file mode 100644
index 000000000000..0ab15633390b
--- /dev/null
+++ b/services/preview/peering/mgmt/2019-08-01-preview/peering/serviceproviders.go
@@ -0,0 +1,151 @@
+package peering
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// ServiceProvidersClient is the peering Client
+type ServiceProvidersClient struct {
+ BaseClient
+}
+
+// NewServiceProvidersClient creates an instance of the ServiceProvidersClient client.
+func NewServiceProvidersClient(subscriptionID string) ServiceProvidersClient {
+ return NewServiceProvidersClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewServiceProvidersClientWithBaseURI creates an instance of the ServiceProvidersClient client.
+func NewServiceProvidersClientWithBaseURI(baseURI string, subscriptionID string) ServiceProvidersClient {
+ return ServiceProvidersClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// List lists all of the available peering service locations for the specified kind of peering.
+func (client ServiceProvidersClient) List(ctx context.Context) (result ServiceProviderListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ServiceProvidersClient.List")
+ defer func() {
+ sc := -1
+ if result.splr.Response.Response != nil {
+ sc = result.splr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.ServiceProvidersClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.splr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "peering.ServiceProvidersClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.splr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.ServiceProvidersClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client ServiceProvidersClient) ListPreparer(ctx context.Context) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Peering/peeringServiceProviders", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client ServiceProvidersClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client ServiceProvidersClient) ListResponder(resp *http.Response) (result ServiceProviderListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client ServiceProvidersClient) listNextResults(ctx context.Context, lastResults ServiceProviderListResult) (result ServiceProviderListResult, err error) {
+ req, err := lastResults.serviceProviderListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "peering.ServiceProvidersClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "peering.ServiceProvidersClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.ServiceProvidersClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client ServiceProvidersClient) ListComplete(ctx context.Context) (result ServiceProviderListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ServiceProvidersClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx)
+ return
+}
diff --git a/services/preview/peering/mgmt/2019-08-01-preview/peering/services.go b/services/preview/peering/mgmt/2019-08-01-preview/peering/services.go
new file mode 100644
index 000000000000..65c4f1337d87
--- /dev/null
+++ b/services/preview/peering/mgmt/2019-08-01-preview/peering/services.go
@@ -0,0 +1,585 @@
+package peering
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// ServicesClient is the peering Client
+type ServicesClient struct {
+ BaseClient
+}
+
+// NewServicesClient creates an instance of the ServicesClient client.
+func NewServicesClient(subscriptionID string) ServicesClient {
+ return NewServicesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewServicesClientWithBaseURI creates an instance of the ServicesClient client.
+func NewServicesClientWithBaseURI(baseURI string, subscriptionID string) ServicesClient {
+ return ServicesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate creates a new peering service or updates an existing peering with the specified name under the given
+// subscription and resource group.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// peeringServiceName - the name of the peering service.
+// peeringService - the properties needed to create or update a peering service.
+func (client ServicesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, peeringServiceName string, peeringService Service) (result Service, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ServicesClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: peeringService,
+ Constraints: []validation.Constraint{{Target: "peeringService.Location", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("peering.ServicesClient", "CreateOrUpdate", err.Error())
+ }
+
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, peeringServiceName, peeringService)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.ServicesClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateOrUpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "peering.ServicesClient", "CreateOrUpdate", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.ServicesClient", "CreateOrUpdate", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client ServicesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, peeringServiceName string, peeringService Service) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "peeringServiceName": autorest.Encode("path", peeringServiceName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering/peeringServices/{peeringServiceName}", pathParameters),
+ autorest.WithJSON(peeringService),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client ServicesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client ServicesClient) CreateOrUpdateResponder(resp *http.Response) (result Service, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes an existing peering service with the specified name under the given subscription and resource group.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// peeringServiceName - the name of the peering service.
+func (client ServicesClient) Delete(ctx context.Context, resourceGroupName string, peeringServiceName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ServicesClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, resourceGroupName, peeringServiceName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.ServicesClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "peering.ServicesClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.ServicesClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client ServicesClient) DeletePreparer(ctx context.Context, resourceGroupName string, peeringServiceName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "peeringServiceName": autorest.Encode("path", peeringServiceName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering/peeringServices/{peeringServiceName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client ServicesClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client ServicesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets an existing peering service with the specified name under the given subscription and resource group.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// peeringServiceName - the name of the peering.
+func (client ServicesClient) Get(ctx context.Context, resourceGroupName string, peeringServiceName string) (result Service, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ServicesClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, resourceGroupName, peeringServiceName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.ServicesClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "peering.ServicesClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.ServicesClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client ServicesClient) GetPreparer(ctx context.Context, resourceGroupName string, peeringServiceName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "peeringServiceName": autorest.Encode("path", peeringServiceName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering/peeringServices/{peeringServiceName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client ServicesClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client ServicesClient) GetResponder(resp *http.Response) (result Service, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByResourceGroup lists all of the peering services under the given subscription and resource group.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+func (client ServicesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result ServiceListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ServicesClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.slr.Response.Response != nil {
+ sc = result.slr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listByResourceGroupNextResults
+ req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.ServicesClient", "ListByResourceGroup", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByResourceGroupSender(req)
+ if err != nil {
+ result.slr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "peering.ServicesClient", "ListByResourceGroup", resp, "Failure sending request")
+ return
+ }
+
+ result.slr, err = client.ListByResourceGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.ServicesClient", "ListByResourceGroup", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
+func (client ServicesClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering/peeringServices", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
+// http.Response Body if it receives an error.
+func (client ServicesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
+// closes the http.Response Body.
+func (client ServicesClient) ListByResourceGroupResponder(resp *http.Response) (result ServiceListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByResourceGroupNextResults retrieves the next set of results, if any.
+func (client ServicesClient) listByResourceGroupNextResults(ctx context.Context, lastResults ServiceListResult) (result ServiceListResult, err error) {
+ req, err := lastResults.serviceListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "peering.ServicesClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByResourceGroupSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "peering.ServicesClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByResourceGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.ServicesClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
+func (client ServicesClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result ServiceListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ServicesClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByResourceGroup(ctx, resourceGroupName)
+ return
+}
+
+// ListBySubscription lists all of the peerings under the given subscription.
+func (client ServicesClient) ListBySubscription(ctx context.Context) (result ServiceListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ServicesClient.ListBySubscription")
+ defer func() {
+ sc := -1
+ if result.slr.Response.Response != nil {
+ sc = result.slr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listBySubscriptionNextResults
+ req, err := client.ListBySubscriptionPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.ServicesClient", "ListBySubscription", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListBySubscriptionSender(req)
+ if err != nil {
+ result.slr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "peering.ServicesClient", "ListBySubscription", resp, "Failure sending request")
+ return
+ }
+
+ result.slr, err = client.ListBySubscriptionResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.ServicesClient", "ListBySubscription", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListBySubscriptionPreparer prepares the ListBySubscription request.
+func (client ServicesClient) ListBySubscriptionPreparer(ctx context.Context) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Peering/peeringServices", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListBySubscriptionSender sends the ListBySubscription request. The method will close the
+// http.Response Body if it receives an error.
+func (client ServicesClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always
+// closes the http.Response Body.
+func (client ServicesClient) ListBySubscriptionResponder(resp *http.Response) (result ServiceListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listBySubscriptionNextResults retrieves the next set of results, if any.
+func (client ServicesClient) listBySubscriptionNextResults(ctx context.Context, lastResults ServiceListResult) (result ServiceListResult, err error) {
+ req, err := lastResults.serviceListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "peering.ServicesClient", "listBySubscriptionNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListBySubscriptionSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "peering.ServicesClient", "listBySubscriptionNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListBySubscriptionResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.ServicesClient", "listBySubscriptionNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListBySubscriptionComplete enumerates all values, automatically crossing page boundaries as required.
+func (client ServicesClient) ListBySubscriptionComplete(ctx context.Context) (result ServiceListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ServicesClient.ListBySubscription")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListBySubscription(ctx)
+ return
+}
+
+// Update updates tags for a peering service with the specified name under the given subscription and resource group.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// peeringServiceName - the name of the peering service.
+// tags - the resource tags.
+func (client ServicesClient) Update(ctx context.Context, resourceGroupName string, peeringServiceName string, tags ResourceTags) (result Service, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ServicesClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.UpdatePreparer(ctx, resourceGroupName, peeringServiceName, tags)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.ServicesClient", "Update", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.UpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "peering.ServicesClient", "Update", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.UpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "peering.ServicesClient", "Update", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// UpdatePreparer prepares the Update request.
+func (client ServicesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, peeringServiceName string, tags ResourceTags) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "peeringServiceName": autorest.Encode("path", peeringServiceName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering/peeringServices/{peeringServiceName}", pathParameters),
+ autorest.WithJSON(tags),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateSender sends the Update request. The method will close the
+// http.Response Body if it receives an error.
+func (client ServicesClient) UpdateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// UpdateResponder handles the response to the Update request. The method always
+// closes the http.Response Body.
+func (client ServicesClient) UpdateResponder(resp *http.Response) (result Service, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/preview/peering/mgmt/2019-08-01-preview/peering/version.go b/services/preview/peering/mgmt/2019-08-01-preview/peering/version.go
new file mode 100644
index 000000000000..23c3bf5cc149
--- /dev/null
+++ b/services/preview/peering/mgmt/2019-08-01-preview/peering/version.go
@@ -0,0 +1,30 @@
+package peering
+
+import "github.com/Azure/azure-sdk-for-go/version"
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+// UserAgent returns the UserAgent string to use when sending http.Requests.
+func UserAgent() string {
+ return "Azure-SDK-For-Go/" + version.Number + " peering/2019-08-01-preview"
+}
+
+// Version returns the semantic version (see http://semver.org) of the client.
+func Version() string {
+ return version.Number
+}
diff --git a/services/preview/portal/mgmt/2018-10-01-preview/portal/client.go b/services/preview/portal/mgmt/2018-10-01-preview/portal/client.go
new file mode 100644
index 000000000000..25bb87b461d4
--- /dev/null
+++ b/services/preview/portal/mgmt/2018-10-01-preview/portal/client.go
@@ -0,0 +1,51 @@
+// Package portal implements the Azure ARM Portal service API version 2018-10-01-preview.
+//
+// Allows creation and deletion of Azure Shared Dashboards.
+package portal
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/Azure/go-autorest/autorest"
+)
+
+const (
+ // DefaultBaseURI is the default URI used for the service Portal
+ DefaultBaseURI = "https://management.azure.com"
+)
+
+// BaseClient is the base client for Portal.
+type BaseClient struct {
+ autorest.Client
+ BaseURI string
+ SubscriptionID string
+}
+
+// New creates an instance of the BaseClient client.
+func New(subscriptionID string) BaseClient {
+ return NewWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewWithBaseURI creates an instance of the BaseClient client.
+func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient {
+ return BaseClient{
+ Client: autorest.NewClientWithUserAgent(UserAgent()),
+ BaseURI: baseURI,
+ SubscriptionID: subscriptionID,
+ }
+}
diff --git a/services/preview/portal/mgmt/2018-10-01-preview/portal/dashboards.go b/services/preview/portal/mgmt/2018-10-01-preview/portal/dashboards.go
new file mode 100644
index 000000000000..d69078e58e88
--- /dev/null
+++ b/services/preview/portal/mgmt/2018-10-01-preview/portal/dashboards.go
@@ -0,0 +1,611 @@
+package portal
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// DashboardsClient is the allows creation and deletion of Azure Shared Dashboards.
+type DashboardsClient struct {
+ BaseClient
+}
+
+// NewDashboardsClient creates an instance of the DashboardsClient client.
+func NewDashboardsClient(subscriptionID string) DashboardsClient {
+ return NewDashboardsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewDashboardsClientWithBaseURI creates an instance of the DashboardsClient client.
+func NewDashboardsClientWithBaseURI(baseURI string, subscriptionID string) DashboardsClient {
+ return DashboardsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate creates or updates a Dashboard.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// dashboardName - the name of the dashboard.
+// dashboard - the parameters required to create or update a dashboard.
+func (client DashboardsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, dashboardName string, dashboard Dashboard) (result Dashboard, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DashboardsClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: dashboardName,
+ Constraints: []validation.Constraint{{Target: "dashboardName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "dashboardName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: dashboard,
+ Constraints: []validation.Constraint{{Target: "dashboard.Location", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("portal.DashboardsClient", "CreateOrUpdate", err.Error())
+ }
+
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, dashboardName, dashboard)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "portal.DashboardsClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateOrUpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "portal.DashboardsClient", "CreateOrUpdate", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "portal.DashboardsClient", "CreateOrUpdate", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client DashboardsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, dashboardName string, dashboard Dashboard) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "dashboardName": autorest.Encode("path", dashboardName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2018-10-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ dashboard.ID = nil
+ dashboard.Name = nil
+ dashboard.Type = nil
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Portal/dashboards/{dashboardName}", pathParameters),
+ autorest.WithJSON(dashboard),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client DashboardsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client DashboardsClient) CreateOrUpdateResponder(resp *http.Response) (result Dashboard, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes the Dashboard.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// dashboardName - the name of the dashboard.
+func (client DashboardsClient) Delete(ctx context.Context, resourceGroupName string, dashboardName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DashboardsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: dashboardName,
+ Constraints: []validation.Constraint{{Target: "dashboardName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "dashboardName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("portal.DashboardsClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, dashboardName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "portal.DashboardsClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "portal.DashboardsClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "portal.DashboardsClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client DashboardsClient) DeletePreparer(ctx context.Context, resourceGroupName string, dashboardName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "dashboardName": autorest.Encode("path", dashboardName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2018-10-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Portal/dashboards/{dashboardName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client DashboardsClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client DashboardsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets the Dashboard.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// dashboardName - the name of the dashboard.
+func (client DashboardsClient) Get(ctx context.Context, resourceGroupName string, dashboardName string) (result Dashboard, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DashboardsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: dashboardName,
+ Constraints: []validation.Constraint{{Target: "dashboardName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "dashboardName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("portal.DashboardsClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, dashboardName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "portal.DashboardsClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "portal.DashboardsClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "portal.DashboardsClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client DashboardsClient) GetPreparer(ctx context.Context, resourceGroupName string, dashboardName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "dashboardName": autorest.Encode("path", dashboardName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2018-10-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Portal/dashboards/{dashboardName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client DashboardsClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client DashboardsClient) GetResponder(resp *http.Response) (result Dashboard, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByResourceGroup gets all the Dashboards within a resource group.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+func (client DashboardsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result DashboardListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DashboardsClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.dlr.Response.Response != nil {
+ sc = result.dlr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listByResourceGroupNextResults
+ req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "portal.DashboardsClient", "ListByResourceGroup", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByResourceGroupSender(req)
+ if err != nil {
+ result.dlr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "portal.DashboardsClient", "ListByResourceGroup", resp, "Failure sending request")
+ return
+ }
+
+ result.dlr, err = client.ListByResourceGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "portal.DashboardsClient", "ListByResourceGroup", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
+func (client DashboardsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2018-10-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Portal/dashboards", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
+// http.Response Body if it receives an error.
+func (client DashboardsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
+// closes the http.Response Body.
+func (client DashboardsClient) ListByResourceGroupResponder(resp *http.Response) (result DashboardListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByResourceGroupNextResults retrieves the next set of results, if any.
+func (client DashboardsClient) listByResourceGroupNextResults(ctx context.Context, lastResults DashboardListResult) (result DashboardListResult, err error) {
+ req, err := lastResults.dashboardListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "portal.DashboardsClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByResourceGroupSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "portal.DashboardsClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByResourceGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "portal.DashboardsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
+func (client DashboardsClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result DashboardListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DashboardsClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByResourceGroup(ctx, resourceGroupName)
+ return
+}
+
+// ListBySubscription gets all the dashboards within a subscription.
+func (client DashboardsClient) ListBySubscription(ctx context.Context) (result DashboardListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DashboardsClient.ListBySubscription")
+ defer func() {
+ sc := -1
+ if result.dlr.Response.Response != nil {
+ sc = result.dlr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listBySubscriptionNextResults
+ req, err := client.ListBySubscriptionPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "portal.DashboardsClient", "ListBySubscription", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListBySubscriptionSender(req)
+ if err != nil {
+ result.dlr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "portal.DashboardsClient", "ListBySubscription", resp, "Failure sending request")
+ return
+ }
+
+ result.dlr, err = client.ListBySubscriptionResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "portal.DashboardsClient", "ListBySubscription", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListBySubscriptionPreparer prepares the ListBySubscription request.
+func (client DashboardsClient) ListBySubscriptionPreparer(ctx context.Context) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2018-10-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Portal/dashboards", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListBySubscriptionSender sends the ListBySubscription request. The method will close the
+// http.Response Body if it receives an error.
+func (client DashboardsClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always
+// closes the http.Response Body.
+func (client DashboardsClient) ListBySubscriptionResponder(resp *http.Response) (result DashboardListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listBySubscriptionNextResults retrieves the next set of results, if any.
+func (client DashboardsClient) listBySubscriptionNextResults(ctx context.Context, lastResults DashboardListResult) (result DashboardListResult, err error) {
+ req, err := lastResults.dashboardListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "portal.DashboardsClient", "listBySubscriptionNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListBySubscriptionSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "portal.DashboardsClient", "listBySubscriptionNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListBySubscriptionResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "portal.DashboardsClient", "listBySubscriptionNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListBySubscriptionComplete enumerates all values, automatically crossing page boundaries as required.
+func (client DashboardsClient) ListBySubscriptionComplete(ctx context.Context) (result DashboardListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DashboardsClient.ListBySubscription")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListBySubscription(ctx)
+ return
+}
+
+// Update updates an existing Dashboard.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// dashboardName - the name of the dashboard.
+// dashboard - the updatable fields of a Dashboard.
+func (client DashboardsClient) Update(ctx context.Context, resourceGroupName string, dashboardName string, dashboard PatchableDashboard) (result Dashboard, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DashboardsClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: dashboardName,
+ Constraints: []validation.Constraint{{Target: "dashboardName", Name: validation.MaxLength, Rule: 64, Chain: nil},
+ {Target: "dashboardName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("portal.DashboardsClient", "Update", err.Error())
+ }
+
+ req, err := client.UpdatePreparer(ctx, resourceGroupName, dashboardName, dashboard)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "portal.DashboardsClient", "Update", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.UpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "portal.DashboardsClient", "Update", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.UpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "portal.DashboardsClient", "Update", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// UpdatePreparer prepares the Update request.
+func (client DashboardsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, dashboardName string, dashboard PatchableDashboard) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "dashboardName": autorest.Encode("path", dashboardName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2018-10-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Portal/dashboards/{dashboardName}", pathParameters),
+ autorest.WithJSON(dashboard),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateSender sends the Update request. The method will close the
+// http.Response Body if it receives an error.
+func (client DashboardsClient) UpdateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// UpdateResponder handles the response to the Update request. The method always
+// closes the http.Response Body.
+func (client DashboardsClient) UpdateResponder(resp *http.Response) (result Dashboard, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/preview/portal/mgmt/2018-10-01-preview/portal/models.go b/services/preview/portal/mgmt/2018-10-01-preview/portal/models.go
new file mode 100644
index 000000000000..6d9873a9d821
--- /dev/null
+++ b/services/preview/portal/mgmt/2018-10-01-preview/portal/models.go
@@ -0,0 +1,615 @@
+package portal
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "encoding/json"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/to"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// The package's fully qualified name.
+const fqdn = "github.com/Azure/azure-sdk-for-go/services/preview/portal/mgmt/2018-10-01-preview/portal"
+
+// Dashboard the shared dashboard resource definition.
+type Dashboard struct {
+ autorest.Response `json:"-"`
+ // DashboardProperties - The shared dashboard properties.
+ *DashboardProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Resource Id
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Resource name
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Resource type
+ Type *string `json:"type,omitempty"`
+ // Location - Resource location
+ Location *string `json:"location,omitempty"`
+ // Tags - Resource tags
+ Tags map[string]*string `json:"tags"`
+}
+
+// MarshalJSON is the custom marshaler for Dashboard.
+func (d Dashboard) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if d.DashboardProperties != nil {
+ objectMap["properties"] = d.DashboardProperties
+ }
+ if d.Location != nil {
+ objectMap["location"] = d.Location
+ }
+ if d.Tags != nil {
+ objectMap["tags"] = d.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for Dashboard struct.
+func (d *Dashboard) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var dashboardProperties DashboardProperties
+ err = json.Unmarshal(*v, &dashboardProperties)
+ if err != nil {
+ return err
+ }
+ d.DashboardProperties = &dashboardProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ d.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ d.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ d.Type = &typeVar
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ d.Location = &location
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ d.Tags = tags
+ }
+ }
+ }
+
+ return nil
+}
+
+// DashboardLens a dashboard lens.
+type DashboardLens struct {
+ // Order - The lens order.
+ Order *int32 `json:"order,omitempty"`
+ // Parts - The dashboard parts.
+ Parts map[string]*DashboardParts `json:"parts"`
+ // Metadata - The dashboard len's metadata.
+ Metadata map[string]interface{} `json:"metadata"`
+}
+
+// MarshalJSON is the custom marshaler for DashboardLens.
+func (dl DashboardLens) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if dl.Order != nil {
+ objectMap["order"] = dl.Order
+ }
+ if dl.Parts != nil {
+ objectMap["parts"] = dl.Parts
+ }
+ if dl.Metadata != nil {
+ objectMap["metadata"] = dl.Metadata
+ }
+ return json.Marshal(objectMap)
+}
+
+// DashboardListResult list of dashboards.
+type DashboardListResult struct {
+ autorest.Response `json:"-"`
+ // Value - The array of custom resource provider manifests.
+ Value *[]Dashboard `json:"value,omitempty"`
+ // NextLink - The URL to use for getting the next set of results.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// DashboardListResultIterator provides access to a complete listing of Dashboard values.
+type DashboardListResultIterator struct {
+ i int
+ page DashboardListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *DashboardListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DashboardListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *DashboardListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter DashboardListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter DashboardListResultIterator) Response() DashboardListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter DashboardListResultIterator) Value() Dashboard {
+ if !iter.page.NotDone() {
+ return Dashboard{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the DashboardListResultIterator type.
+func NewDashboardListResultIterator(page DashboardListResultPage) DashboardListResultIterator {
+ return DashboardListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (dlr DashboardListResult) IsEmpty() bool {
+ return dlr.Value == nil || len(*dlr.Value) == 0
+}
+
+// dashboardListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (dlr DashboardListResult) dashboardListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if dlr.NextLink == nil || len(to.String(dlr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(dlr.NextLink)))
+}
+
+// DashboardListResultPage contains a page of Dashboard values.
+type DashboardListResultPage struct {
+ fn func(context.Context, DashboardListResult) (DashboardListResult, error)
+ dlr DashboardListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *DashboardListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DashboardListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.dlr)
+ if err != nil {
+ return err
+ }
+ page.dlr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *DashboardListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page DashboardListResultPage) NotDone() bool {
+ return !page.dlr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page DashboardListResultPage) Response() DashboardListResult {
+ return page.dlr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page DashboardListResultPage) Values() []Dashboard {
+ if page.dlr.IsEmpty() {
+ return nil
+ }
+ return *page.dlr.Value
+}
+
+// Creates a new instance of the DashboardListResultPage type.
+func NewDashboardListResultPage(getNextPage func(context.Context, DashboardListResult) (DashboardListResult, error)) DashboardListResultPage {
+ return DashboardListResultPage{fn: getNextPage}
+}
+
+// DashboardParts a dashboard part.
+type DashboardParts struct {
+ // Position - The dashboard's part position.
+ Position *DashboardPartsPosition `json:"position,omitempty"`
+ // Metadata - The dashboard part's metadata.
+ Metadata map[string]interface{} `json:"metadata"`
+}
+
+// MarshalJSON is the custom marshaler for DashboardParts.
+func (dp DashboardParts) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if dp.Position != nil {
+ objectMap["position"] = dp.Position
+ }
+ if dp.Metadata != nil {
+ objectMap["metadata"] = dp.Metadata
+ }
+ return json.Marshal(objectMap)
+}
+
+// DashboardPartsPosition the dashboard's part position.
+type DashboardPartsPosition struct {
+ // X - The dashboard's part x coordinate.
+ X *float64 `json:"x,omitempty"`
+ // Y - The dashboard's part y coordinate.
+ Y *float64 `json:"y,omitempty"`
+ // RowSpan - The dashboard's part row span.
+ RowSpan *float64 `json:"rowSpan,omitempty"`
+ // ColSpan - The dashboard's part column span.
+ ColSpan *float64 `json:"colSpan,omitempty"`
+ // Metadata - The dashboard part's metadata.
+ Metadata map[string]interface{} `json:"metadata"`
+}
+
+// MarshalJSON is the custom marshaler for DashboardPartsPosition.
+func (dp DashboardPartsPosition) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if dp.X != nil {
+ objectMap["x"] = dp.X
+ }
+ if dp.Y != nil {
+ objectMap["y"] = dp.Y
+ }
+ if dp.RowSpan != nil {
+ objectMap["rowSpan"] = dp.RowSpan
+ }
+ if dp.ColSpan != nil {
+ objectMap["colSpan"] = dp.ColSpan
+ }
+ if dp.Metadata != nil {
+ objectMap["metadata"] = dp.Metadata
+ }
+ return json.Marshal(objectMap)
+}
+
+// DashboardProperties the shared dashboard properties.
+type DashboardProperties struct {
+ // Lenses - The dashboard lenses.
+ Lenses map[string]*DashboardLens `json:"lenses"`
+ // Metadata - The dashboard metadata.
+ Metadata map[string]interface{} `json:"metadata"`
+}
+
+// MarshalJSON is the custom marshaler for DashboardProperties.
+func (dp DashboardProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if dp.Lenses != nil {
+ objectMap["lenses"] = dp.Lenses
+ }
+ if dp.Metadata != nil {
+ objectMap["metadata"] = dp.Metadata
+ }
+ return json.Marshal(objectMap)
+}
+
+// ErrorDefinition error definition.
+type ErrorDefinition struct {
+ // Code - READ-ONLY; Service specific error code which serves as the substatus for the HTTP error code.
+ Code *string `json:"code,omitempty"`
+ // Message - READ-ONLY; Description of the error.
+ Message *string `json:"message,omitempty"`
+ // Details - READ-ONLY; Internal error details.
+ Details *[]ErrorDefinition `json:"details,omitempty"`
+}
+
+// ErrorResponse error response.
+type ErrorResponse struct {
+ // Error - The error details.
+ Error *ErrorDefinition `json:"error,omitempty"`
+}
+
+// PatchableDashboard the shared dashboard resource definition.
+type PatchableDashboard struct {
+ // DashboardProperties - The shared dashboard properties.
+ *DashboardProperties `json:"properties,omitempty"`
+ // Tags - Resource tags
+ Tags map[string]*string `json:"tags"`
+}
+
+// MarshalJSON is the custom marshaler for PatchableDashboard.
+func (pd PatchableDashboard) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if pd.DashboardProperties != nil {
+ objectMap["properties"] = pd.DashboardProperties
+ }
+ if pd.Tags != nil {
+ objectMap["tags"] = pd.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for PatchableDashboard struct.
+func (pd *PatchableDashboard) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var dashboardProperties DashboardProperties
+ err = json.Unmarshal(*v, &dashboardProperties)
+ if err != nil {
+ return err
+ }
+ pd.DashboardProperties = &dashboardProperties
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ pd.Tags = tags
+ }
+ }
+ }
+
+ return nil
+}
+
+// ResourceProviderOperation supported operations of this resource provider.
+type ResourceProviderOperation struct {
+ // Name - Operation name, in format of {provider}/{resource}/{operation}
+ Name *string `json:"name,omitempty"`
+ // IsDataAction - Indicates whether the operation applies to data-plane.
+ IsDataAction *string `json:"isDataAction,omitempty"`
+ // Display - Display metadata associated with the operation.
+ Display *ResourceProviderOperationDisplay `json:"display,omitempty"`
+}
+
+// ResourceProviderOperationDisplay display metadata associated with the operation.
+type ResourceProviderOperationDisplay struct {
+ // Provider - Resource provider: Microsoft Custom Providers.
+ Provider *string `json:"provider,omitempty"`
+ // Resource - Resource on which the operation is performed.
+ Resource *string `json:"resource,omitempty"`
+ // Operation - Type of operation: get, read, delete, etc.
+ Operation *string `json:"operation,omitempty"`
+ // Description - Description of this operation.
+ Description *string `json:"description,omitempty"`
+}
+
+// ResourceProviderOperationList results of the request to list operations.
+type ResourceProviderOperationList struct {
+ autorest.Response `json:"-"`
+ // Value - List of operations supported by this resource provider.
+ Value *[]ResourceProviderOperation `json:"value,omitempty"`
+ // NextLink - The URL to use for getting the next set of results.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// ResourceProviderOperationListIterator provides access to a complete listing of ResourceProviderOperation
+// values.
+type ResourceProviderOperationListIterator struct {
+ i int
+ page ResourceProviderOperationListPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *ResourceProviderOperationListIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ResourceProviderOperationListIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *ResourceProviderOperationListIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter ResourceProviderOperationListIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter ResourceProviderOperationListIterator) Response() ResourceProviderOperationList {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter ResourceProviderOperationListIterator) Value() ResourceProviderOperation {
+ if !iter.page.NotDone() {
+ return ResourceProviderOperation{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the ResourceProviderOperationListIterator type.
+func NewResourceProviderOperationListIterator(page ResourceProviderOperationListPage) ResourceProviderOperationListIterator {
+ return ResourceProviderOperationListIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (rpol ResourceProviderOperationList) IsEmpty() bool {
+ return rpol.Value == nil || len(*rpol.Value) == 0
+}
+
+// resourceProviderOperationListPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (rpol ResourceProviderOperationList) resourceProviderOperationListPreparer(ctx context.Context) (*http.Request, error) {
+ if rpol.NextLink == nil || len(to.String(rpol.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(rpol.NextLink)))
+}
+
+// ResourceProviderOperationListPage contains a page of ResourceProviderOperation values.
+type ResourceProviderOperationListPage struct {
+ fn func(context.Context, ResourceProviderOperationList) (ResourceProviderOperationList, error)
+ rpol ResourceProviderOperationList
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *ResourceProviderOperationListPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ResourceProviderOperationListPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.rpol)
+ if err != nil {
+ return err
+ }
+ page.rpol = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *ResourceProviderOperationListPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page ResourceProviderOperationListPage) NotDone() bool {
+ return !page.rpol.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page ResourceProviderOperationListPage) Response() ResourceProviderOperationList {
+ return page.rpol
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page ResourceProviderOperationListPage) Values() []ResourceProviderOperation {
+ if page.rpol.IsEmpty() {
+ return nil
+ }
+ return *page.rpol.Value
+}
+
+// Creates a new instance of the ResourceProviderOperationListPage type.
+func NewResourceProviderOperationListPage(getNextPage func(context.Context, ResourceProviderOperationList) (ResourceProviderOperationList, error)) ResourceProviderOperationListPage {
+ return ResourceProviderOperationListPage{fn: getNextPage}
+}
diff --git a/services/preview/portal/mgmt/2018-10-01-preview/portal/operations.go b/services/preview/portal/mgmt/2018-10-01-preview/portal/operations.go
new file mode 100644
index 000000000000..cd80a0013eec
--- /dev/null
+++ b/services/preview/portal/mgmt/2018-10-01-preview/portal/operations.go
@@ -0,0 +1,147 @@
+package portal
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// OperationsClient is the allows creation and deletion of Azure Shared Dashboards.
+type OperationsClient struct {
+ BaseClient
+}
+
+// NewOperationsClient creates an instance of the OperationsClient client.
+func NewOperationsClient(subscriptionID string) OperationsClient {
+ return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client.
+func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient {
+ return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// List the Microsoft Portal operations API.
+func (client OperationsClient) List(ctx context.Context) (result ResourceProviderOperationListPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List")
+ defer func() {
+ sc := -1
+ if result.rpol.Response.Response != nil {
+ sc = result.rpol.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "portal.OperationsClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.rpol.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "portal.OperationsClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.rpol, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "portal.OperationsClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) {
+ const APIVersion = "2018-10-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPath("/providers/Microsoft.Portal/operations"),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client OperationsClient) ListResponder(resp *http.Response) (result ResourceProviderOperationList, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client OperationsClient) listNextResults(ctx context.Context, lastResults ResourceProviderOperationList) (result ResourceProviderOperationList, err error) {
+ req, err := lastResults.resourceProviderOperationListPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "portal.OperationsClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "portal.OperationsClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "portal.OperationsClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client OperationsClient) ListComplete(ctx context.Context) (result ResourceProviderOperationListIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx)
+ return
+}
diff --git a/services/preview/portal/mgmt/2018-10-01-preview/portal/portalapi/interfaces.go b/services/preview/portal/mgmt/2018-10-01-preview/portal/portalapi/interfaces.go
new file mode 100644
index 000000000000..41bb316b2a9e
--- /dev/null
+++ b/services/preview/portal/mgmt/2018-10-01-preview/portal/portalapi/interfaces.go
@@ -0,0 +1,43 @@
+package portalapi
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/azure-sdk-for-go/services/preview/portal/mgmt/2018-10-01-preview/portal"
+ "github.com/Azure/go-autorest/autorest"
+)
+
+// OperationsClientAPI contains the set of methods on the OperationsClient type.
+type OperationsClientAPI interface {
+ List(ctx context.Context) (result portal.ResourceProviderOperationListPage, err error)
+}
+
+var _ OperationsClientAPI = (*portal.OperationsClient)(nil)
+
+// DashboardsClientAPI contains the set of methods on the DashboardsClient type.
+type DashboardsClientAPI interface {
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, dashboardName string, dashboard portal.Dashboard) (result portal.Dashboard, err error)
+ Delete(ctx context.Context, resourceGroupName string, dashboardName string) (result autorest.Response, err error)
+ Get(ctx context.Context, resourceGroupName string, dashboardName string) (result portal.Dashboard, err error)
+ ListByResourceGroup(ctx context.Context, resourceGroupName string) (result portal.DashboardListResultPage, err error)
+ ListBySubscription(ctx context.Context) (result portal.DashboardListResultPage, err error)
+ Update(ctx context.Context, resourceGroupName string, dashboardName string, dashboard portal.PatchableDashboard) (result portal.Dashboard, err error)
+}
+
+var _ DashboardsClientAPI = (*portal.DashboardsClient)(nil)
diff --git a/services/preview/portal/mgmt/2018-10-01-preview/portal/version.go b/services/preview/portal/mgmt/2018-10-01-preview/portal/version.go
new file mode 100644
index 000000000000..adbf6617b0e6
--- /dev/null
+++ b/services/preview/portal/mgmt/2018-10-01-preview/portal/version.go
@@ -0,0 +1,30 @@
+package portal
+
+import "github.com/Azure/azure-sdk-for-go/version"
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+// UserAgent returns the UserAgent string to use when sending http.Requests.
+func UserAgent() string {
+ return "Azure-SDK-For-Go/" + version.Number + " portal/2018-10-01-preview"
+}
+
+// Version returns the semantic version (see http://semver.org) of the client.
+func Version() string {
+ return version.Number
+}
diff --git a/services/preview/reservations/mgmt/2019-04-01/reservations/models.go b/services/preview/reservations/mgmt/2019-04-01/reservations/models.go
index a3bf29b0f7fe..6cc43ecb16d1 100644
--- a/services/preview/reservations/mgmt/2019-04-01/reservations/models.go
+++ b/services/preview/reservations/mgmt/2019-04-01/reservations/models.go
@@ -1113,6 +1113,12 @@ type PatchPropertiesRenewProperties struct {
// Properties ...
type Properties struct {
+ autorest.Response `json:"-"`
+ Properties *SubscriptionScopeProperties `json:"properties,omitempty"`
+}
+
+// PropertiesType ...
+type PropertiesType struct {
// ReservedResourceType - Possible values include: 'VirtualMachines', 'SQLDatabases', 'SuseLinux', 'CosmosDb', 'RedHat', 'SQLDataWarehouse', 'VMwareCloudSimple', 'RedHatOsa'
ReservedResourceType ReservedResourceType `json:"reservedResourceType,omitempty"`
// InstanceFlexibility - Possible values include: 'On', 'Off'
@@ -1143,6 +1149,8 @@ type Properties struct {
// RenewDestination - Reservation Id of the reservation which is purchased because of renew. Format of the resource Id is /providers/Microsoft.Capacity/reservationOrders/{reservationOrderId}/reservations/{reservationId}.
RenewDestination *string `json:"renewDestination,omitempty"`
RenewProperties *RenewPropertiesResponse `json:"renewProperties,omitempty"`
+ // Term - Possible values include: 'P1Y', 'P3Y'
+ Term ReservationTerm `json:"term,omitempty"`
}
// PurchaseRequest ...
@@ -1258,6 +1266,35 @@ type RenewPropertiesResponsePricingCurrencyTotal struct {
Amount *float64 `json:"amount,omitempty"`
}
+// ReservationAvailableScopesFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type ReservationAvailableScopesFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *ReservationAvailableScopesFuture) Result(client Client) (p Properties, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "reservations.ReservationAvailableScopesFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("reservations.ReservationAvailableScopesFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if p.Response.Response, err = future.GetResult(sender); err == nil && p.Response.Response.StatusCode != http.StatusNoContent {
+ p, err = client.AvailableScopesResponder(p.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "reservations.ReservationAvailableScopesFuture", "Result", p.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
// ReservationMergeFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type ReservationMergeFuture struct {
@@ -1325,13 +1362,19 @@ type Response struct {
// ID - READ-ONLY; Identifier of the reservation
ID *string `json:"id,omitempty"`
// Name - READ-ONLY; Name of the reservation
- Name *string `json:"name,omitempty"`
- Sku *SkuName `json:"sku,omitempty"`
- Properties *Properties `json:"properties,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Sku *SkuName `json:"sku,omitempty"`
+ Properties *PropertiesType `json:"properties,omitempty"`
// Type - READ-ONLY; Type of resource. "Microsoft.Capacity/reservationOrders/reservations"
Type *string `json:"type,omitempty"`
}
+// ScopeProperties ...
+type ScopeProperties struct {
+ Scope *string `json:"scope,omitempty"`
+ Valid *bool `json:"valid,omitempty"`
+}
+
// SkuName ...
type SkuName struct {
Name *string `json:"name,omitempty"`
@@ -1436,3 +1479,8 @@ func (sr *SplitRequest) UnmarshalJSON(body []byte) error {
return nil
}
+
+// SubscriptionScopeProperties ...
+type SubscriptionScopeProperties struct {
+ Scopes *[]ScopeProperties `json:"scopes,omitempty"`
+}
diff --git a/services/preview/reservations/mgmt/2019-04-01/reservations/reservation.go b/services/preview/reservations/mgmt/2019-04-01/reservations/reservation.go
index 655cfd6f0c1a..fd55c8b35a17 100644
--- a/services/preview/reservations/mgmt/2019-04-01/reservations/reservation.go
+++ b/services/preview/reservations/mgmt/2019-04-01/reservations/reservation.go
@@ -21,6 +21,7 @@ import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -40,6 +41,90 @@ func NewClientWithBaseURI(baseURI string) Client {
return Client{NewWithBaseURI(baseURI)}
}
+// AvailableScopes get Available Scopes for `Reservation`.
+// Parameters:
+// reservationOrderID - order Id of the reservation
+// reservationID - id of the Reservation Item
+func (client Client) AvailableScopes(ctx context.Context, reservationOrderID string, reservationID string, body []string) (result ReservationAvailableScopesFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/Client.AvailableScopes")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: body,
+ Constraints: []validation.Constraint{{Target: "body", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("reservations.Client", "AvailableScopes", err.Error())
+ }
+
+ req, err := client.AvailableScopesPreparer(ctx, reservationOrderID, reservationID, body)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "reservations.Client", "AvailableScopes", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.AvailableScopesSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "reservations.Client", "AvailableScopes", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// AvailableScopesPreparer prepares the AvailableScopes request.
+func (client Client) AvailableScopesPreparer(ctx context.Context, reservationOrderID string, reservationID string, body []string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "reservationId": autorest.Encode("path", reservationID),
+ "reservationOrderId": autorest.Encode("path", reservationOrderID),
+ }
+
+ const APIVersion = "2019-04-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/providers/Microsoft.Capacity/reservationOrders/{reservationOrderId}/reservations/{reservationId}/availableScopes", pathParameters),
+ autorest.WithJSON(body),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// AvailableScopesSender sends the AvailableScopes request. The method will close the
+// http.Response Body if it receives an error.
+func (client Client) AvailableScopesSender(req *http.Request) (future ReservationAvailableScopesFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// AvailableScopesResponder handles the response to the AvailableScopes request. The method always
+// closes the http.Response Body.
+func (client Client) AvailableScopesResponder(resp *http.Response) (result Properties, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
// Get get specific `Reservation` details.
// Parameters:
// reservationID - id of the Reservation Item
diff --git a/services/preview/reservations/mgmt/2019-04-01/reservations/reservationsapi/interfaces.go b/services/preview/reservations/mgmt/2019-04-01/reservations/reservationsapi/interfaces.go
index 8da92d58e3b0..70e8f000f936 100644
--- a/services/preview/reservations/mgmt/2019-04-01/reservations/reservationsapi/interfaces.go
+++ b/services/preview/reservations/mgmt/2019-04-01/reservations/reservationsapi/interfaces.go
@@ -30,18 +30,9 @@ type BaseClientAPI interface {
var _ BaseClientAPI = (*reservations.BaseClient)(nil)
-// OrderClientAPI contains the set of methods on the OrderClient type.
-type OrderClientAPI interface {
- Calculate(ctx context.Context, body reservations.PurchaseRequest) (result reservations.CalculatePriceResponse, err error)
- Get(ctx context.Context, reservationOrderID string) (result reservations.OrderResponse, err error)
- List(ctx context.Context) (result reservations.OrderListPage, err error)
- Purchase(ctx context.Context, reservationOrderID string, body reservations.PurchaseRequest) (result reservations.OrderPurchaseFuture, err error)
-}
-
-var _ OrderClientAPI = (*reservations.OrderClient)(nil)
-
// ClientAPI contains the set of methods on the Client type.
type ClientAPI interface {
+ AvailableScopes(ctx context.Context, reservationOrderID string, reservationID string, body []string) (result reservations.ReservationAvailableScopesFuture, err error)
Get(ctx context.Context, reservationID string, reservationOrderID string, expand string) (result reservations.Response, err error)
List(ctx context.Context, reservationOrderID string) (result reservations.ListPage, err error)
ListRevisions(ctx context.Context, reservationID string, reservationOrderID string) (result reservations.ListPage, err error)
@@ -52,6 +43,16 @@ type ClientAPI interface {
var _ ClientAPI = (*reservations.Client)(nil)
+// OrderClientAPI contains the set of methods on the OrderClient type.
+type OrderClientAPI interface {
+ Calculate(ctx context.Context, body reservations.PurchaseRequest) (result reservations.CalculatePriceResponse, err error)
+ Get(ctx context.Context, reservationOrderID string) (result reservations.OrderResponse, err error)
+ List(ctx context.Context) (result reservations.OrderListPage, err error)
+ Purchase(ctx context.Context, reservationOrderID string, body reservations.PurchaseRequest) (result reservations.OrderPurchaseFuture, err error)
+}
+
+var _ OrderClientAPI = (*reservations.OrderClient)(nil)
+
// OperationClientAPI contains the set of methods on the OperationClient type.
type OperationClientAPI interface {
List(ctx context.Context) (result reservations.OperationListPage, err error)
diff --git a/services/preview/security/mgmt/v2.0/security/iotsecuritysolution.go b/services/preview/security/mgmt/v2.0/security/iotsecuritysolution.go
new file mode 100644
index 000000000000..73b1112f75c2
--- /dev/null
+++ b/services/preview/security/mgmt/v2.0/security/iotsecuritysolution.go
@@ -0,0 +1,413 @@
+package security
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// IotSecuritySolutionClient is the API spec for Microsoft.Security (Azure Security Center) resource provider
+type IotSecuritySolutionClient struct {
+ BaseClient
+}
+
+// NewIotSecuritySolutionClient creates an instance of the IotSecuritySolutionClient client.
+func NewIotSecuritySolutionClient(subscriptionID string, ascLocation string) IotSecuritySolutionClient {
+ return NewIotSecuritySolutionClientWithBaseURI(DefaultBaseURI, subscriptionID, ascLocation)
+}
+
+// NewIotSecuritySolutionClientWithBaseURI creates an instance of the IotSecuritySolutionClient client.
+func NewIotSecuritySolutionClientWithBaseURI(baseURI string, subscriptionID string, ascLocation string) IotSecuritySolutionClient {
+ return IotSecuritySolutionClient{NewWithBaseURI(baseURI, subscriptionID, ascLocation)}
+}
+
+// Create create new solution manager
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// solutionName - the solution manager name
+// iotSecuritySolutionData - the security solution data
+func (client IotSecuritySolutionClient) Create(ctx context.Context, resourceGroupName string, solutionName string, iotSecuritySolutionData IoTSecuritySolutionModel) (result IoTSecuritySolutionModel, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/IotSecuritySolutionClient.Create")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.Pattern, Rule: `^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$`, Chain: nil}}},
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: iotSecuritySolutionData,
+ Constraints: []validation.Constraint{{Target: "iotSecuritySolutionData.IoTSecuritySolutionProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "iotSecuritySolutionData.IoTSecuritySolutionProperties.Workspace", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "iotSecuritySolutionData.IoTSecuritySolutionProperties.DisplayName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "iotSecuritySolutionData.IoTSecuritySolutionProperties.IotHubs", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "iotSecuritySolutionData.IoTSecuritySolutionProperties.UserDefinedResources", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "iotSecuritySolutionData.IoTSecuritySolutionProperties.UserDefinedResources.Query", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "iotSecuritySolutionData.IoTSecuritySolutionProperties.UserDefinedResources.QuerySubscriptions", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ }}}}}); err != nil {
+ return result, validation.NewError("security.IotSecuritySolutionClient", "Create", err.Error())
+ }
+
+ req, err := client.CreatePreparer(ctx, resourceGroupName, solutionName, iotSecuritySolutionData)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "security.IotSecuritySolutionClient", "Create", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "security.IotSecuritySolutionClient", "Create", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "security.IotSecuritySolutionClient", "Create", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreatePreparer prepares the Create request.
+func (client IotSecuritySolutionClient) CreatePreparer(ctx context.Context, resourceGroupName string, solutionName string, iotSecuritySolutionData IoTSecuritySolutionModel) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "solutionName": autorest.Encode("path", solutionName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2017-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ iotSecuritySolutionData.ID = nil
+ iotSecuritySolutionData.Name = nil
+ iotSecuritySolutionData.Type = nil
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Security/iotSecuritySolutions/{solutionName}", pathParameters),
+ autorest.WithJSON(iotSecuritySolutionData),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateSender sends the Create request. The method will close the
+// http.Response Body if it receives an error.
+func (client IotSecuritySolutionClient) CreateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CreateResponder handles the response to the Create request. The method always
+// closes the http.Response Body.
+func (client IotSecuritySolutionClient) CreateResponder(resp *http.Response) (result IoTSecuritySolutionModel, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete create new solution manager
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// solutionName - the solution manager name
+func (client IotSecuritySolutionClient) Delete(ctx context.Context, resourceGroupName string, solutionName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/IotSecuritySolutionClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.Pattern, Rule: `^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$`, Chain: nil}}},
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("security.IotSecuritySolutionClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, solutionName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "security.IotSecuritySolutionClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "security.IotSecuritySolutionClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "security.IotSecuritySolutionClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client IotSecuritySolutionClient) DeletePreparer(ctx context.Context, resourceGroupName string, solutionName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "solutionName": autorest.Encode("path", solutionName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2017-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Security/iotSecuritySolutions/{solutionName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client IotSecuritySolutionClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client IotSecuritySolutionClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get details of a specific iot security solution
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// solutionName - the solution manager name
+func (client IotSecuritySolutionClient) Get(ctx context.Context, resourceGroupName string, solutionName string) (result IoTSecuritySolutionModel, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/IotSecuritySolutionClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.Pattern, Rule: `^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$`, Chain: nil}}},
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("security.IotSecuritySolutionClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, solutionName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "security.IotSecuritySolutionClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "security.IotSecuritySolutionClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "security.IotSecuritySolutionClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client IotSecuritySolutionClient) GetPreparer(ctx context.Context, resourceGroupName string, solutionName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "solutionName": autorest.Encode("path", solutionName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2017-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Security/iotSecuritySolutions/{solutionName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client IotSecuritySolutionClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client IotSecuritySolutionClient) GetResponder(resp *http.Response) (result IoTSecuritySolutionModel, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Update update existing Security Solution tags or user defined resources. To update other fields use the
+// CreateOrUpdate method
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// solutionName - the solution manager name
+// updateIotSecuritySolutionData - the security solution data
+func (client IotSecuritySolutionClient) Update(ctx context.Context, resourceGroupName string, solutionName string, updateIotSecuritySolutionData UpdateIotSecuritySolutionData) (result IoTSecuritySolutionModel, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/IotSecuritySolutionClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.Pattern, Rule: `^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$`, Chain: nil}}},
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("security.IotSecuritySolutionClient", "Update", err.Error())
+ }
+
+ req, err := client.UpdatePreparer(ctx, resourceGroupName, solutionName, updateIotSecuritySolutionData)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "security.IotSecuritySolutionClient", "Update", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.UpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "security.IotSecuritySolutionClient", "Update", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.UpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "security.IotSecuritySolutionClient", "Update", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// UpdatePreparer prepares the Update request.
+func (client IotSecuritySolutionClient) UpdatePreparer(ctx context.Context, resourceGroupName string, solutionName string, updateIotSecuritySolutionData UpdateIotSecuritySolutionData) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "solutionName": autorest.Encode("path", solutionName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2017-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Security/iotSecuritySolutions/{solutionName}", pathParameters),
+ autorest.WithJSON(updateIotSecuritySolutionData),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateSender sends the Update request. The method will close the
+// http.Response Body if it receives an error.
+func (client IotSecuritySolutionClient) UpdateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// UpdateResponder handles the response to the Update request. The method always
+// closes the http.Response Body.
+func (client IotSecuritySolutionClient) UpdateResponder(resp *http.Response) (result IoTSecuritySolutionModel, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/preview/security/mgmt/v2.0/security/iotsecuritysolutions.go b/services/preview/security/mgmt/v2.0/security/iotsecuritysolutions.go
new file mode 100644
index 000000000000..14fbfd273a72
--- /dev/null
+++ b/services/preview/security/mgmt/v2.0/security/iotsecuritysolutions.go
@@ -0,0 +1,163 @@
+package security
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// IoTSecuritySolutionsClient is the API spec for Microsoft.Security (Azure Security Center) resource provider
+type IoTSecuritySolutionsClient struct {
+ BaseClient
+}
+
+// NewIoTSecuritySolutionsClient creates an instance of the IoTSecuritySolutionsClient client.
+func NewIoTSecuritySolutionsClient(subscriptionID string, ascLocation string) IoTSecuritySolutionsClient {
+ return NewIoTSecuritySolutionsClientWithBaseURI(DefaultBaseURI, subscriptionID, ascLocation)
+}
+
+// NewIoTSecuritySolutionsClientWithBaseURI creates an instance of the IoTSecuritySolutionsClient client.
+func NewIoTSecuritySolutionsClientWithBaseURI(baseURI string, subscriptionID string, ascLocation string) IoTSecuritySolutionsClient {
+ return IoTSecuritySolutionsClient{NewWithBaseURI(baseURI, subscriptionID, ascLocation)}
+}
+
+// List list of security solutions
+// Parameters:
+// filter - filter the Security Solution with OData syntax. supporting filter by iotHubs
+func (client IoTSecuritySolutionsClient) List(ctx context.Context, filter string) (result IoTSecuritySolutionsListPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/IoTSecuritySolutionsClient.List")
+ defer func() {
+ sc := -1
+ if result.itssl.Response.Response != nil {
+ sc = result.itssl.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.Pattern, Rule: `^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("security.IoTSecuritySolutionsClient", "List", err.Error())
+ }
+
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx, filter)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.itssl.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.itssl, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client IoTSecuritySolutionsClient) ListPreparer(ctx context.Context, filter string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2017-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Security/iotSecuritySolutions", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client IoTSecuritySolutionsClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client IoTSecuritySolutionsClient) ListResponder(resp *http.Response) (result IoTSecuritySolutionsList, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client IoTSecuritySolutionsClient) listNextResults(ctx context.Context, lastResults IoTSecuritySolutionsList) (result IoTSecuritySolutionsList, err error) {
+ req, err := lastResults.ioTSecuritySolutionsListPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client IoTSecuritySolutionsClient) ListComplete(ctx context.Context, filter string) (result IoTSecuritySolutionsListIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/IoTSecuritySolutionsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, filter)
+ return
+}
diff --git a/services/preview/security/mgmt/v2.0/security/iotsecuritysolutionsanalytics.go b/services/preview/security/mgmt/v2.0/security/iotsecuritysolutionsanalytics.go
new file mode 100644
index 000000000000..d242868be31b
--- /dev/null
+++ b/services/preview/security/mgmt/v2.0/security/iotsecuritysolutionsanalytics.go
@@ -0,0 +1,219 @@
+package security
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// IoTSecuritySolutionsAnalyticsClient is the API spec for Microsoft.Security (Azure Security Center) resource provider
+type IoTSecuritySolutionsAnalyticsClient struct {
+ BaseClient
+}
+
+// NewIoTSecuritySolutionsAnalyticsClient creates an instance of the IoTSecuritySolutionsAnalyticsClient client.
+func NewIoTSecuritySolutionsAnalyticsClient(subscriptionID string, ascLocation string) IoTSecuritySolutionsAnalyticsClient {
+ return NewIoTSecuritySolutionsAnalyticsClientWithBaseURI(DefaultBaseURI, subscriptionID, ascLocation)
+}
+
+// NewIoTSecuritySolutionsAnalyticsClientWithBaseURI creates an instance of the IoTSecuritySolutionsAnalyticsClient
+// client.
+func NewIoTSecuritySolutionsAnalyticsClientWithBaseURI(baseURI string, subscriptionID string, ascLocation string) IoTSecuritySolutionsAnalyticsClient {
+ return IoTSecuritySolutionsAnalyticsClient{NewWithBaseURI(baseURI, subscriptionID, ascLocation)}
+}
+
+// GetAll security Analytics of a security solution
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// solutionName - the solution manager name
+func (client IoTSecuritySolutionsAnalyticsClient) GetAll(ctx context.Context, resourceGroupName string, solutionName string) (result IoTSecuritySolutionAnalyticsModelList, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/IoTSecuritySolutionsAnalyticsClient.GetAll")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.Pattern, Rule: `^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$`, Chain: nil}}},
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("security.IoTSecuritySolutionsAnalyticsClient", "GetAll", err.Error())
+ }
+
+ req, err := client.GetAllPreparer(ctx, resourceGroupName, solutionName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsAnalyticsClient", "GetAll", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetAllSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsAnalyticsClient", "GetAll", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetAllResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsAnalyticsClient", "GetAll", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetAllPreparer prepares the GetAll request.
+func (client IoTSecuritySolutionsAnalyticsClient) GetAllPreparer(ctx context.Context, resourceGroupName string, solutionName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "solutionName": autorest.Encode("path", solutionName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2017-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Security/iotSecuritySolutions/{solutionName}/analyticsModels", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetAllSender sends the GetAll request. The method will close the
+// http.Response Body if it receives an error.
+func (client IoTSecuritySolutionsAnalyticsClient) GetAllSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetAllResponder handles the response to the GetAll request. The method always
+// closes the http.Response Body.
+func (client IoTSecuritySolutionsAnalyticsClient) GetAllResponder(resp *http.Response) (result IoTSecuritySolutionAnalyticsModelList, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetDefault security Analytics of a security solution
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// solutionName - the solution manager name
+func (client IoTSecuritySolutionsAnalyticsClient) GetDefault(ctx context.Context, resourceGroupName string, solutionName string) (result IoTSecuritySolutionAnalyticsModel, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/IoTSecuritySolutionsAnalyticsClient.GetDefault")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.Pattern, Rule: `^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$`, Chain: nil}}},
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("security.IoTSecuritySolutionsAnalyticsClient", "GetDefault", err.Error())
+ }
+
+ req, err := client.GetDefaultPreparer(ctx, resourceGroupName, solutionName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsAnalyticsClient", "GetDefault", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetDefaultSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsAnalyticsClient", "GetDefault", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetDefaultResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsAnalyticsClient", "GetDefault", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetDefaultPreparer prepares the GetDefault request.
+func (client IoTSecuritySolutionsAnalyticsClient) GetDefaultPreparer(ctx context.Context, resourceGroupName string, solutionName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "solutionName": autorest.Encode("path", solutionName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2017-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Security/iotSecuritySolutions/{solutionName}/analyticsModels/default", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetDefaultSender sends the GetDefault request. The method will close the
+// http.Response Body if it receives an error.
+func (client IoTSecuritySolutionsAnalyticsClient) GetDefaultSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetDefaultResponder handles the response to the GetDefault request. The method always
+// closes the http.Response Body.
+func (client IoTSecuritySolutionsAnalyticsClient) GetDefaultResponder(resp *http.Response) (result IoTSecuritySolutionAnalyticsModel, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/preview/security/mgmt/v2.0/security/iotsecuritysolutionsanalyticsaggregatedalert.go b/services/preview/security/mgmt/v2.0/security/iotsecuritysolutionsanalyticsaggregatedalert.go
new file mode 100644
index 000000000000..07196e670723
--- /dev/null
+++ b/services/preview/security/mgmt/v2.0/security/iotsecuritysolutionsanalyticsaggregatedalert.go
@@ -0,0 +1,224 @@
+package security
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// IoTSecuritySolutionsAnalyticsAggregatedAlertClient is the API spec for Microsoft.Security (Azure Security Center)
+// resource provider
+type IoTSecuritySolutionsAnalyticsAggregatedAlertClient struct {
+ BaseClient
+}
+
+// NewIoTSecuritySolutionsAnalyticsAggregatedAlertClient creates an instance of the
+// IoTSecuritySolutionsAnalyticsAggregatedAlertClient client.
+func NewIoTSecuritySolutionsAnalyticsAggregatedAlertClient(subscriptionID string, ascLocation string) IoTSecuritySolutionsAnalyticsAggregatedAlertClient {
+ return NewIoTSecuritySolutionsAnalyticsAggregatedAlertClientWithBaseURI(DefaultBaseURI, subscriptionID, ascLocation)
+}
+
+// NewIoTSecuritySolutionsAnalyticsAggregatedAlertClientWithBaseURI creates an instance of the
+// IoTSecuritySolutionsAnalyticsAggregatedAlertClient client.
+func NewIoTSecuritySolutionsAnalyticsAggregatedAlertClientWithBaseURI(baseURI string, subscriptionID string, ascLocation string) IoTSecuritySolutionsAnalyticsAggregatedAlertClient {
+ return IoTSecuritySolutionsAnalyticsAggregatedAlertClient{NewWithBaseURI(baseURI, subscriptionID, ascLocation)}
+}
+
+// Dismiss security Analytics of a security solution
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// solutionName - the solution manager name
+// aggregatedAlertName - identifier of the aggregated alert
+func (client IoTSecuritySolutionsAnalyticsAggregatedAlertClient) Dismiss(ctx context.Context, resourceGroupName string, solutionName string, aggregatedAlertName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/IoTSecuritySolutionsAnalyticsAggregatedAlertClient.Dismiss")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.Pattern, Rule: `^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$`, Chain: nil}}},
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("security.IoTSecuritySolutionsAnalyticsAggregatedAlertClient", "Dismiss", err.Error())
+ }
+
+ req, err := client.DismissPreparer(ctx, resourceGroupName, solutionName, aggregatedAlertName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsAnalyticsAggregatedAlertClient", "Dismiss", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DismissSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsAnalyticsAggregatedAlertClient", "Dismiss", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DismissResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsAnalyticsAggregatedAlertClient", "Dismiss", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DismissPreparer prepares the Dismiss request.
+func (client IoTSecuritySolutionsAnalyticsAggregatedAlertClient) DismissPreparer(ctx context.Context, resourceGroupName string, solutionName string, aggregatedAlertName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "aggregatedAlertName": autorest.Encode("path", aggregatedAlertName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "solutionName": autorest.Encode("path", solutionName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2017-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Security/iotSecuritySolutions/{solutionName}/analyticsModels/default/aggregatedAlerts/{aggregatedAlertName}/dismiss", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DismissSender sends the Dismiss request. The method will close the
+// http.Response Body if it receives an error.
+func (client IoTSecuritySolutionsAnalyticsAggregatedAlertClient) DismissSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DismissResponder handles the response to the Dismiss request. The method always
+// closes the http.Response Body.
+func (client IoTSecuritySolutionsAnalyticsAggregatedAlertClient) DismissResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get security Analytics of a security solution
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// solutionName - the solution manager name
+// aggregatedAlertName - identifier of the aggregated alert
+func (client IoTSecuritySolutionsAnalyticsAggregatedAlertClient) Get(ctx context.Context, resourceGroupName string, solutionName string, aggregatedAlertName string) (result IoTSecurityAggregatedAlert, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/IoTSecuritySolutionsAnalyticsAggregatedAlertClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.Pattern, Rule: `^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$`, Chain: nil}}},
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("security.IoTSecuritySolutionsAnalyticsAggregatedAlertClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, solutionName, aggregatedAlertName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsAnalyticsAggregatedAlertClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsAnalyticsAggregatedAlertClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsAnalyticsAggregatedAlertClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client IoTSecuritySolutionsAnalyticsAggregatedAlertClient) GetPreparer(ctx context.Context, resourceGroupName string, solutionName string, aggregatedAlertName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "aggregatedAlertName": autorest.Encode("path", aggregatedAlertName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "solutionName": autorest.Encode("path", solutionName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2017-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Security/iotSecuritySolutions/{solutionName}/analyticsModels/default/aggregatedAlerts/{aggregatedAlertName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client IoTSecuritySolutionsAnalyticsAggregatedAlertClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client IoTSecuritySolutionsAnalyticsAggregatedAlertClient) GetResponder(resp *http.Response) (result IoTSecurityAggregatedAlert, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/preview/security/mgmt/v2.0/security/iotsecuritysolutionsanalyticsaggregatedalerts.go b/services/preview/security/mgmt/v2.0/security/iotsecuritysolutionsanalyticsaggregatedalerts.go
new file mode 100644
index 000000000000..0e295e14f36b
--- /dev/null
+++ b/services/preview/security/mgmt/v2.0/security/iotsecuritysolutionsanalyticsaggregatedalerts.go
@@ -0,0 +1,175 @@
+package security
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// IoTSecuritySolutionsAnalyticsAggregatedAlertsClient is the API spec for Microsoft.Security (Azure Security Center)
+// resource provider
+type IoTSecuritySolutionsAnalyticsAggregatedAlertsClient struct {
+ BaseClient
+}
+
+// NewIoTSecuritySolutionsAnalyticsAggregatedAlertsClient creates an instance of the
+// IoTSecuritySolutionsAnalyticsAggregatedAlertsClient client.
+func NewIoTSecuritySolutionsAnalyticsAggregatedAlertsClient(subscriptionID string, ascLocation string) IoTSecuritySolutionsAnalyticsAggregatedAlertsClient {
+ return NewIoTSecuritySolutionsAnalyticsAggregatedAlertsClientWithBaseURI(DefaultBaseURI, subscriptionID, ascLocation)
+}
+
+// NewIoTSecuritySolutionsAnalyticsAggregatedAlertsClientWithBaseURI creates an instance of the
+// IoTSecuritySolutionsAnalyticsAggregatedAlertsClient client.
+func NewIoTSecuritySolutionsAnalyticsAggregatedAlertsClientWithBaseURI(baseURI string, subscriptionID string, ascLocation string) IoTSecuritySolutionsAnalyticsAggregatedAlertsClient {
+ return IoTSecuritySolutionsAnalyticsAggregatedAlertsClient{NewWithBaseURI(baseURI, subscriptionID, ascLocation)}
+}
+
+// List security Analytics of a security solution
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// solutionName - the solution manager name
+// top - the number of results to retrieve.
+func (client IoTSecuritySolutionsAnalyticsAggregatedAlertsClient) List(ctx context.Context, resourceGroupName string, solutionName string, top *int32) (result IoTSecurityAggregatedAlertListPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/IoTSecuritySolutionsAnalyticsAggregatedAlertsClient.List")
+ defer func() {
+ sc := -1
+ if result.itsaal.Response.Response != nil {
+ sc = result.itsaal.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.Pattern, Rule: `^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$`, Chain: nil}}},
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("security.IoTSecuritySolutionsAnalyticsAggregatedAlertsClient", "List", err.Error())
+ }
+
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx, resourceGroupName, solutionName, top)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsAnalyticsAggregatedAlertsClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.itsaal.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsAnalyticsAggregatedAlertsClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.itsaal, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsAnalyticsAggregatedAlertsClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client IoTSecuritySolutionsAnalyticsAggregatedAlertsClient) ListPreparer(ctx context.Context, resourceGroupName string, solutionName string, top *int32) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "solutionName": autorest.Encode("path", solutionName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2017-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if top != nil {
+ queryParameters["$top"] = autorest.Encode("query", *top)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Security/iotSecuritySolutions/{solutionName}/analyticsModels/default/aggregatedAlerts", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client IoTSecuritySolutionsAnalyticsAggregatedAlertsClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client IoTSecuritySolutionsAnalyticsAggregatedAlertsClient) ListResponder(resp *http.Response) (result IoTSecurityAggregatedAlertList, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client IoTSecuritySolutionsAnalyticsAggregatedAlertsClient) listNextResults(ctx context.Context, lastResults IoTSecurityAggregatedAlertList) (result IoTSecurityAggregatedAlertList, err error) {
+ req, err := lastResults.ioTSecurityAggregatedAlertListPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsAnalyticsAggregatedAlertsClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsAnalyticsAggregatedAlertsClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsAnalyticsAggregatedAlertsClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client IoTSecuritySolutionsAnalyticsAggregatedAlertsClient) ListComplete(ctx context.Context, resourceGroupName string, solutionName string, top *int32) (result IoTSecurityAggregatedAlertListIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/IoTSecuritySolutionsAnalyticsAggregatedAlertsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, resourceGroupName, solutionName, top)
+ return
+}
diff --git a/services/preview/security/mgmt/v2.0/security/iotsecuritysolutionsanalyticsrecommendation.go b/services/preview/security/mgmt/v2.0/security/iotsecuritysolutionsanalyticsrecommendation.go
new file mode 100644
index 000000000000..ff3fad682371
--- /dev/null
+++ b/services/preview/security/mgmt/v2.0/security/iotsecuritysolutionsanalyticsrecommendation.go
@@ -0,0 +1,135 @@
+package security
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// IoTSecuritySolutionsAnalyticsRecommendationClient is the API spec for Microsoft.Security (Azure Security Center)
+// resource provider
+type IoTSecuritySolutionsAnalyticsRecommendationClient struct {
+ BaseClient
+}
+
+// NewIoTSecuritySolutionsAnalyticsRecommendationClient creates an instance of the
+// IoTSecuritySolutionsAnalyticsRecommendationClient client.
+func NewIoTSecuritySolutionsAnalyticsRecommendationClient(subscriptionID string, ascLocation string) IoTSecuritySolutionsAnalyticsRecommendationClient {
+ return NewIoTSecuritySolutionsAnalyticsRecommendationClientWithBaseURI(DefaultBaseURI, subscriptionID, ascLocation)
+}
+
+// NewIoTSecuritySolutionsAnalyticsRecommendationClientWithBaseURI creates an instance of the
+// IoTSecuritySolutionsAnalyticsRecommendationClient client.
+func NewIoTSecuritySolutionsAnalyticsRecommendationClientWithBaseURI(baseURI string, subscriptionID string, ascLocation string) IoTSecuritySolutionsAnalyticsRecommendationClient {
+ return IoTSecuritySolutionsAnalyticsRecommendationClient{NewWithBaseURI(baseURI, subscriptionID, ascLocation)}
+}
+
+// Get security Analytics of a security solution
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// solutionName - the solution manager name
+// aggregatedRecommendationName - identifier of the aggregated recommendation
+func (client IoTSecuritySolutionsAnalyticsRecommendationClient) Get(ctx context.Context, resourceGroupName string, solutionName string, aggregatedRecommendationName string) (result IoTSecurityAggregatedRecommendation, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/IoTSecuritySolutionsAnalyticsRecommendationClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.Pattern, Rule: `^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$`, Chain: nil}}},
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("security.IoTSecuritySolutionsAnalyticsRecommendationClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, solutionName, aggregatedRecommendationName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsAnalyticsRecommendationClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsAnalyticsRecommendationClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsAnalyticsRecommendationClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client IoTSecuritySolutionsAnalyticsRecommendationClient) GetPreparer(ctx context.Context, resourceGroupName string, solutionName string, aggregatedRecommendationName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "aggregatedRecommendationName": autorest.Encode("path", aggregatedRecommendationName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "solutionName": autorest.Encode("path", solutionName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2017-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Security/iotSecuritySolutions/{solutionName}/analyticsModels/default/aggregatedRecommendations/{aggregatedRecommendationName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client IoTSecuritySolutionsAnalyticsRecommendationClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client IoTSecuritySolutionsAnalyticsRecommendationClient) GetResponder(resp *http.Response) (result IoTSecurityAggregatedRecommendation, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/preview/security/mgmt/v2.0/security/iotsecuritysolutionsanalyticsrecommendations.go b/services/preview/security/mgmt/v2.0/security/iotsecuritysolutionsanalyticsrecommendations.go
new file mode 100644
index 000000000000..fbda8ca7f69e
--- /dev/null
+++ b/services/preview/security/mgmt/v2.0/security/iotsecuritysolutionsanalyticsrecommendations.go
@@ -0,0 +1,175 @@
+package security
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// IoTSecuritySolutionsAnalyticsRecommendationsClient is the API spec for Microsoft.Security (Azure Security Center)
+// resource provider
+type IoTSecuritySolutionsAnalyticsRecommendationsClient struct {
+ BaseClient
+}
+
+// NewIoTSecuritySolutionsAnalyticsRecommendationsClient creates an instance of the
+// IoTSecuritySolutionsAnalyticsRecommendationsClient client.
+func NewIoTSecuritySolutionsAnalyticsRecommendationsClient(subscriptionID string, ascLocation string) IoTSecuritySolutionsAnalyticsRecommendationsClient {
+ return NewIoTSecuritySolutionsAnalyticsRecommendationsClientWithBaseURI(DefaultBaseURI, subscriptionID, ascLocation)
+}
+
+// NewIoTSecuritySolutionsAnalyticsRecommendationsClientWithBaseURI creates an instance of the
+// IoTSecuritySolutionsAnalyticsRecommendationsClient client.
+func NewIoTSecuritySolutionsAnalyticsRecommendationsClientWithBaseURI(baseURI string, subscriptionID string, ascLocation string) IoTSecuritySolutionsAnalyticsRecommendationsClient {
+ return IoTSecuritySolutionsAnalyticsRecommendationsClient{NewWithBaseURI(baseURI, subscriptionID, ascLocation)}
+}
+
+// List security Analytics of a security solution
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// solutionName - the solution manager name
+// top - the number of results to retrieve.
+func (client IoTSecuritySolutionsAnalyticsRecommendationsClient) List(ctx context.Context, resourceGroupName string, solutionName string, top *int32) (result IoTSecurityAggregatedRecommendationListPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/IoTSecuritySolutionsAnalyticsRecommendationsClient.List")
+ defer func() {
+ sc := -1
+ if result.itsarl.Response.Response != nil {
+ sc = result.itsarl.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.Pattern, Rule: `^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$`, Chain: nil}}},
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("security.IoTSecuritySolutionsAnalyticsRecommendationsClient", "List", err.Error())
+ }
+
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx, resourceGroupName, solutionName, top)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsAnalyticsRecommendationsClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.itsarl.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsAnalyticsRecommendationsClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.itsarl, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsAnalyticsRecommendationsClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client IoTSecuritySolutionsAnalyticsRecommendationsClient) ListPreparer(ctx context.Context, resourceGroupName string, solutionName string, top *int32) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "solutionName": autorest.Encode("path", solutionName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2017-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if top != nil {
+ queryParameters["$top"] = autorest.Encode("query", *top)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Security/iotSecuritySolutions/{solutionName}/analyticsModels/default/aggregatedRecommendations", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client IoTSecuritySolutionsAnalyticsRecommendationsClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client IoTSecuritySolutionsAnalyticsRecommendationsClient) ListResponder(resp *http.Response) (result IoTSecurityAggregatedRecommendationList, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client IoTSecuritySolutionsAnalyticsRecommendationsClient) listNextResults(ctx context.Context, lastResults IoTSecurityAggregatedRecommendationList) (result IoTSecurityAggregatedRecommendationList, err error) {
+ req, err := lastResults.ioTSecurityAggregatedRecommendationListPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsAnalyticsRecommendationsClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsAnalyticsRecommendationsClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsAnalyticsRecommendationsClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client IoTSecuritySolutionsAnalyticsRecommendationsClient) ListComplete(ctx context.Context, resourceGroupName string, solutionName string, top *int32) (result IoTSecurityAggregatedRecommendationListIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/IoTSecuritySolutionsAnalyticsRecommendationsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, resourceGroupName, solutionName, top)
+ return
+}
diff --git a/services/preview/security/mgmt/v2.0/security/iotsecuritysolutionsresourcegroup.go b/services/preview/security/mgmt/v2.0/security/iotsecuritysolutionsresourcegroup.go
new file mode 100644
index 000000000000..71faba118a1e
--- /dev/null
+++ b/services/preview/security/mgmt/v2.0/security/iotsecuritysolutionsresourcegroup.go
@@ -0,0 +1,173 @@
+package security
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// IoTSecuritySolutionsResourceGroupClient is the API spec for Microsoft.Security (Azure Security Center) resource
+// provider
+type IoTSecuritySolutionsResourceGroupClient struct {
+ BaseClient
+}
+
+// NewIoTSecuritySolutionsResourceGroupClient creates an instance of the IoTSecuritySolutionsResourceGroupClient
+// client.
+func NewIoTSecuritySolutionsResourceGroupClient(subscriptionID string, ascLocation string) IoTSecuritySolutionsResourceGroupClient {
+ return NewIoTSecuritySolutionsResourceGroupClientWithBaseURI(DefaultBaseURI, subscriptionID, ascLocation)
+}
+
+// NewIoTSecuritySolutionsResourceGroupClientWithBaseURI creates an instance of the
+// IoTSecuritySolutionsResourceGroupClient client.
+func NewIoTSecuritySolutionsResourceGroupClientWithBaseURI(baseURI string, subscriptionID string, ascLocation string) IoTSecuritySolutionsResourceGroupClient {
+ return IoTSecuritySolutionsResourceGroupClient{NewWithBaseURI(baseURI, subscriptionID, ascLocation)}
+}
+
+// List list of security solutions
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// filter - filter the Security Solution with OData syntax. supporting filter by iotHubs
+func (client IoTSecuritySolutionsResourceGroupClient) List(ctx context.Context, resourceGroupName string, filter string) (result IoTSecuritySolutionsListPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/IoTSecuritySolutionsResourceGroupClient.List")
+ defer func() {
+ sc := -1
+ if result.itssl.Response.Response != nil {
+ sc = result.itssl.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.Pattern, Rule: `^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$`, Chain: nil}}},
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("security.IoTSecuritySolutionsResourceGroupClient", "List", err.Error())
+ }
+
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx, resourceGroupName, filter)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsResourceGroupClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.itssl.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsResourceGroupClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.itssl, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsResourceGroupClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client IoTSecuritySolutionsResourceGroupClient) ListPreparer(ctx context.Context, resourceGroupName string, filter string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2017-08-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Security/iotSecuritySolutions", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client IoTSecuritySolutionsResourceGroupClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client IoTSecuritySolutionsResourceGroupClient) ListResponder(resp *http.Response) (result IoTSecuritySolutionsList, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client IoTSecuritySolutionsResourceGroupClient) listNextResults(ctx context.Context, lastResults IoTSecuritySolutionsList) (result IoTSecuritySolutionsList, err error) {
+ req, err := lastResults.ioTSecuritySolutionsListPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsResourceGroupClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsResourceGroupClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "security.IoTSecuritySolutionsResourceGroupClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client IoTSecuritySolutionsResourceGroupClient) ListComplete(ctx context.Context, resourceGroupName string, filter string) (result IoTSecuritySolutionsListIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/IoTSecuritySolutionsResourceGroupClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, resourceGroupName, filter)
+ return
+}
diff --git a/services/preview/security/mgmt/v2.0/security/models.go b/services/preview/security/mgmt/v2.0/security/models.go
index d2369e5e53bb..01ae7161a81d 100644
--- a/services/preview/security/mgmt/v2.0/security/models.go
+++ b/services/preview/security/mgmt/v2.0/security/models.go
@@ -189,6 +189,19 @@ func PossibleConnectionTypeValues() []ConnectionType {
return []ConnectionType{External, Internal}
}
+// DataSource enumerates the values for data source.
+type DataSource string
+
+const (
+ // TwinData Devices twin data
+ TwinData DataSource = "TwinData"
+)
+
+// PossibleDataSourceValues returns an array of possible values for the DataSource const type.
+func PossibleDataSourceValues() []DataSource {
+ return []DataSource{TwinData}
+}
+
// Direction enumerates the values for direction.
type Direction string
@@ -234,6 +247,19 @@ func PossibleEnforcementMode1Values() []EnforcementMode1 {
return []EnforcementMode1{EnforcementMode1Audit, EnforcementMode1Enforce}
}
+// ExportData enumerates the values for export data.
+type ExportData string
+
+const (
+ // RawEvents Agent raw events
+ RawEvents ExportData = "RawEvents"
+)
+
+// PossibleExportDataValues returns an array of possible values for the ExportData const type.
+func PossibleExportDataValues() []ExportData {
+ return []ExportData{RawEvents}
+}
+
// ExternalSecuritySolutionKind enumerates the values for external security solution kind.
type ExternalSecuritySolutionKind string
@@ -401,6 +427,21 @@ func PossibleRecommendationAction1Values() []RecommendationAction1 {
return []RecommendationAction1{RecommendationAction1Add, RecommendationAction1Recommended, RecommendationAction1Remove}
}
+// RecommendationConfigStatus enumerates the values for recommendation config status.
+type RecommendationConfigStatus string
+
+const (
+ // Disabled ...
+ Disabled RecommendationConfigStatus = "Disabled"
+ // Enabled ...
+ Enabled RecommendationConfigStatus = "Enabled"
+)
+
+// PossibleRecommendationConfigStatusValues returns an array of possible values for the RecommendationConfigStatus const type.
+func PossibleRecommendationConfigStatusValues() []RecommendationConfigStatus {
+ return []RecommendationConfigStatus{Disabled, Enabled}
+}
+
// RecommendationStatus enumerates the values for recommendation status.
type RecommendationStatus string
@@ -420,6 +461,61 @@ func PossibleRecommendationStatusValues() []RecommendationStatus {
return []RecommendationStatus{RecommendationStatusNoStatus, RecommendationStatusNotAvailable, RecommendationStatusNotRecommended, RecommendationStatusRecommended}
}
+// RecommendationType enumerates the values for recommendation type.
+type RecommendationType string
+
+const (
+ // IoTACRAuthentication Authentication schema used for pull an edge module from an ACR repository does not
+ // use Service Principal Authentication.
+ IoTACRAuthentication RecommendationType = "IoT_ACRAuthentication"
+ // IoTAgentSendsUnutilizedMessages IoT agent message size capacity is currently underutilized, causing an
+ // increase in the number of sent messages. Adjust message intervals for better utilization.
+ IoTAgentSendsUnutilizedMessages RecommendationType = "IoT_AgentSendsUnutilizedMessages"
+ // IoTBaseline Identified security related system configuration issues.
+ IoTBaseline RecommendationType = "IoT_Baseline"
+ // IoTEdgeHubMemOptimize You can optimize Edge Hub memory usage by turning off protocol heads for any
+ // protocols not used by Edge modules in your solution.
+ IoTEdgeHubMemOptimize RecommendationType = "IoT_EdgeHubMemOptimize"
+ // IoTEdgeLoggingOptions Logging is disabled for this edge module.
+ IoTEdgeLoggingOptions RecommendationType = "IoT_EdgeLoggingOptions"
+ // IoTInconsistentModuleSettings A minority within a device security group has inconsistent Edge Module
+ // settings with the rest of their group.
+ IoTInconsistentModuleSettings RecommendationType = "IoT_InconsistentModuleSettings"
+ // IoTInstallAgent Install the Azure Security of Things Agent.
+ IoTInstallAgent RecommendationType = "IoT_InstallAgent"
+ // IoTIPFilterDenyAll IP Filter Configuration should have rules defined for allowed traffic and should deny
+ // all other traffic by default.
+ IoTIPFilterDenyAll RecommendationType = "IoT_IPFilter_DenyAll"
+ // IoTIPFilterPermissiveRule An Allow IP Filter rules source IP range is too large. Overly permissive rules
+ // might expose your IoT hub to malicious intenders.
+ IoTIPFilterPermissiveRule RecommendationType = "IoT_IPFilter_PermissiveRule"
+ // IoTOpenPorts A listening endpoint was found on the device.
+ IoTOpenPorts RecommendationType = "IoT_OpenPorts"
+ // IoTPermissiveFirewallPolicy An Allowed firewall policy was found (INPUT/OUTPUT). The policy should Deny
+ // all traffic by default and define rules to allow necessary communication to/from the device.
+ IoTPermissiveFirewallPolicy RecommendationType = "IoT_PermissiveFirewallPolicy"
+ // IoTPermissiveInputFirewallRules A rule in the firewall has been found that contains a permissive pattern
+ // for a wide range of IP addresses or Ports.
+ IoTPermissiveInputFirewallRules RecommendationType = "IoT_PermissiveInputFirewallRules"
+ // IoTPermissiveOutputFirewallRules A rule in the firewall has been found that contains a permissive
+ // pattern for a wide range of IP addresses or Ports.
+ IoTPermissiveOutputFirewallRules RecommendationType = "IoT_PermissiveOutputFirewallRules"
+ // IoTPrivilegedDockerOptions Edge module is configured to run in privileged mode, with extensive Linux
+ // capabilities or with host-level network access (send/receive data to host machine).
+ IoTPrivilegedDockerOptions RecommendationType = "IoT_PrivilegedDockerOptions"
+ // IoTSharedCredentials Same authentication credentials to the IoT Hub used by multiple devices. This could
+ // indicate an illegitimate device impersonating a legitimate device. It also exposes the risk of device
+ // impersonation by an attacker.
+ IoTSharedCredentials RecommendationType = "IoT_SharedCredentials"
+ // IoTVulnerableTLSCipherSuite Insecure TLS configurations detected. Immediate upgrade recommended.
+ IoTVulnerableTLSCipherSuite RecommendationType = "IoT_VulnerableTLSCipherSuite"
+)
+
+// PossibleRecommendationTypeValues returns an array of possible values for the RecommendationType const type.
+func PossibleRecommendationTypeValues() []RecommendationType {
+ return []RecommendationType{IoTACRAuthentication, IoTAgentSendsUnutilizedMessages, IoTBaseline, IoTEdgeHubMemOptimize, IoTEdgeLoggingOptions, IoTInconsistentModuleSettings, IoTInstallAgent, IoTIPFilterDenyAll, IoTIPFilterPermissiveRule, IoTOpenPorts, IoTPermissiveFirewallPolicy, IoTPermissiveInputFirewallRules, IoTPermissiveOutputFirewallRules, IoTPrivilegedDockerOptions, IoTSharedCredentials, IoTVulnerableTLSCipherSuite}
+}
+
// ReportedSeverity enumerates the values for reported severity.
type ReportedSeverity string
@@ -454,6 +550,21 @@ func PossibleSettingKindValues() []SettingKind {
return []SettingKind{SettingKindAlertSuppressionSetting, SettingKindDataExportSetting}
}
+// SolutionStatus enumerates the values for solution status.
+type SolutionStatus string
+
+const (
+ // SolutionStatusDisabled ...
+ SolutionStatusDisabled SolutionStatus = "Disabled"
+ // SolutionStatusEnabled ...
+ SolutionStatusEnabled SolutionStatus = "Enabled"
+)
+
+// PossibleSolutionStatusValues returns an array of possible values for the SolutionStatus const type.
+func PossibleSolutionStatusValues() []SolutionStatus {
+ return []SolutionStatus{SolutionStatusDisabled, SolutionStatusEnabled}
+}
+
// SourceSystem enumerates the values for source system.
type SourceSystem string
@@ -4212,26 +4323,114 @@ type InformationType struct {
Keywords *[]InformationProtectionKeyword `json:"keywords,omitempty"`
}
-// JitNetworkAccessPoliciesList ...
-type JitNetworkAccessPoliciesList struct {
+// IoTSecurityAggregatedAlert security Solution Aggregated Alert information
+type IoTSecurityAggregatedAlert struct {
autorest.Response `json:"-"`
- Value *[]JitNetworkAccessPolicy `json:"value,omitempty"`
+ // ID - READ-ONLY; Resource Id
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Resource name
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Resource type
+ Type *string `json:"type,omitempty"`
+ // Tags - Resource tags
+ Tags map[string]*string `json:"tags"`
+ // IoTSecurityAggregatedAlertProperties - Security Solution Aggregated Alert data
+ *IoTSecurityAggregatedAlertProperties `json:"properties,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for IoTSecurityAggregatedAlert.
+func (itsaa IoTSecurityAggregatedAlert) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if itsaa.Tags != nil {
+ objectMap["tags"] = itsaa.Tags
+ }
+ if itsaa.IoTSecurityAggregatedAlertProperties != nil {
+ objectMap["properties"] = itsaa.IoTSecurityAggregatedAlertProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for IoTSecurityAggregatedAlert struct.
+func (itsaa *IoTSecurityAggregatedAlert) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ itsaa.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ itsaa.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ itsaa.Type = &typeVar
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ itsaa.Tags = tags
+ }
+ case "properties":
+ if v != nil {
+ var ioTSecurityAggregatedAlertProperties IoTSecurityAggregatedAlertProperties
+ err = json.Unmarshal(*v, &ioTSecurityAggregatedAlertProperties)
+ if err != nil {
+ return err
+ }
+ itsaa.IoTSecurityAggregatedAlertProperties = &ioTSecurityAggregatedAlertProperties
+ }
+ }
+ }
+
+ return nil
+}
+
+// IoTSecurityAggregatedAlertList list of IoT aggregated security alerts
+type IoTSecurityAggregatedAlertList struct {
+ autorest.Response `json:"-"`
+ // Value - List of aggregated alerts data
+ Value *[]IoTSecurityAggregatedAlert `json:"value,omitempty"`
// NextLink - READ-ONLY; The URI to fetch the next page.
NextLink *string `json:"nextLink,omitempty"`
}
-// JitNetworkAccessPoliciesListIterator provides access to a complete listing of JitNetworkAccessPolicy
-// values.
-type JitNetworkAccessPoliciesListIterator struct {
+// IoTSecurityAggregatedAlertListIterator provides access to a complete listing of
+// IoTSecurityAggregatedAlert values.
+type IoTSecurityAggregatedAlertListIterator struct {
i int
- page JitNetworkAccessPoliciesListPage
+ page IoTSecurityAggregatedAlertListPage
}
// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
-func (iter *JitNetworkAccessPoliciesListIterator) NextWithContext(ctx context.Context) (err error) {
+func (iter *IoTSecurityAggregatedAlertListIterator) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/JitNetworkAccessPoliciesListIterator.NextWithContext")
+ ctx = tracing.StartSpan(ctx, fqdn+"/IoTSecurityAggregatedAlertListIterator.NextWithContext")
defer func() {
sc := -1
if iter.Response().Response.Response != nil {
@@ -4256,62 +4455,62 @@ func (iter *JitNetworkAccessPoliciesListIterator) NextWithContext(ctx context.Co
// Next advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
-func (iter *JitNetworkAccessPoliciesListIterator) Next() error {
+func (iter *IoTSecurityAggregatedAlertListIterator) Next() error {
return iter.NextWithContext(context.Background())
}
// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter JitNetworkAccessPoliciesListIterator) NotDone() bool {
+func (iter IoTSecurityAggregatedAlertListIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
}
// Response returns the raw server response from the last page request.
-func (iter JitNetworkAccessPoliciesListIterator) Response() JitNetworkAccessPoliciesList {
+func (iter IoTSecurityAggregatedAlertListIterator) Response() IoTSecurityAggregatedAlertList {
return iter.page.Response()
}
// Value returns the current value or a zero-initialized value if the
// iterator has advanced beyond the end of the collection.
-func (iter JitNetworkAccessPoliciesListIterator) Value() JitNetworkAccessPolicy {
+func (iter IoTSecurityAggregatedAlertListIterator) Value() IoTSecurityAggregatedAlert {
if !iter.page.NotDone() {
- return JitNetworkAccessPolicy{}
+ return IoTSecurityAggregatedAlert{}
}
return iter.page.Values()[iter.i]
}
-// Creates a new instance of the JitNetworkAccessPoliciesListIterator type.
-func NewJitNetworkAccessPoliciesListIterator(page JitNetworkAccessPoliciesListPage) JitNetworkAccessPoliciesListIterator {
- return JitNetworkAccessPoliciesListIterator{page: page}
+// Creates a new instance of the IoTSecurityAggregatedAlertListIterator type.
+func NewIoTSecurityAggregatedAlertListIterator(page IoTSecurityAggregatedAlertListPage) IoTSecurityAggregatedAlertListIterator {
+ return IoTSecurityAggregatedAlertListIterator{page: page}
}
// IsEmpty returns true if the ListResult contains no values.
-func (jnapl JitNetworkAccessPoliciesList) IsEmpty() bool {
- return jnapl.Value == nil || len(*jnapl.Value) == 0
+func (itsaal IoTSecurityAggregatedAlertList) IsEmpty() bool {
+ return itsaal.Value == nil || len(*itsaal.Value) == 0
}
-// jitNetworkAccessPoliciesListPreparer prepares a request to retrieve the next set of results.
+// ioTSecurityAggregatedAlertListPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
-func (jnapl JitNetworkAccessPoliciesList) jitNetworkAccessPoliciesListPreparer(ctx context.Context) (*http.Request, error) {
- if jnapl.NextLink == nil || len(to.String(jnapl.NextLink)) < 1 {
+func (itsaal IoTSecurityAggregatedAlertList) ioTSecurityAggregatedAlertListPreparer(ctx context.Context) (*http.Request, error) {
+ if itsaal.NextLink == nil || len(to.String(itsaal.NextLink)) < 1 {
return nil, nil
}
return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
- autorest.WithBaseURL(to.String(jnapl.NextLink)))
+ autorest.WithBaseURL(to.String(itsaal.NextLink)))
}
-// JitNetworkAccessPoliciesListPage contains a page of JitNetworkAccessPolicy values.
-type JitNetworkAccessPoliciesListPage struct {
- fn func(context.Context, JitNetworkAccessPoliciesList) (JitNetworkAccessPoliciesList, error)
- jnapl JitNetworkAccessPoliciesList
+// IoTSecurityAggregatedAlertListPage contains a page of IoTSecurityAggregatedAlert values.
+type IoTSecurityAggregatedAlertListPage struct {
+ fn func(context.Context, IoTSecurityAggregatedAlertList) (IoTSecurityAggregatedAlertList, error)
+ itsaal IoTSecurityAggregatedAlertList
}
// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
-func (page *JitNetworkAccessPoliciesListPage) NextWithContext(ctx context.Context) (err error) {
+func (page *IoTSecurityAggregatedAlertListPage) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/JitNetworkAccessPoliciesListPage.NextWithContext")
+ ctx = tracing.StartSpan(ctx, fqdn+"/IoTSecurityAggregatedAlertListPage.NextWithContext")
defer func() {
sc := -1
if page.Response().Response.Response != nil {
@@ -4320,46 +4519,74 @@ func (page *JitNetworkAccessPoliciesListPage) NextWithContext(ctx context.Contex
tracing.EndSpan(ctx, sc, err)
}()
}
- next, err := page.fn(ctx, page.jnapl)
+ next, err := page.fn(ctx, page.itsaal)
if err != nil {
return err
}
- page.jnapl = next
+ page.itsaal = next
return nil
}
// Next advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
-func (page *JitNetworkAccessPoliciesListPage) Next() error {
+func (page *IoTSecurityAggregatedAlertListPage) Next() error {
return page.NextWithContext(context.Background())
}
// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page JitNetworkAccessPoliciesListPage) NotDone() bool {
- return !page.jnapl.IsEmpty()
+func (page IoTSecurityAggregatedAlertListPage) NotDone() bool {
+ return !page.itsaal.IsEmpty()
}
// Response returns the raw server response from the last page request.
-func (page JitNetworkAccessPoliciesListPage) Response() JitNetworkAccessPoliciesList {
- return page.jnapl
+func (page IoTSecurityAggregatedAlertListPage) Response() IoTSecurityAggregatedAlertList {
+ return page.itsaal
}
// Values returns the slice of values for the current page or nil if there are no values.
-func (page JitNetworkAccessPoliciesListPage) Values() []JitNetworkAccessPolicy {
- if page.jnapl.IsEmpty() {
+func (page IoTSecurityAggregatedAlertListPage) Values() []IoTSecurityAggregatedAlert {
+ if page.itsaal.IsEmpty() {
return nil
}
- return *page.jnapl.Value
+ return *page.itsaal.Value
}
-// Creates a new instance of the JitNetworkAccessPoliciesListPage type.
-func NewJitNetworkAccessPoliciesListPage(getNextPage func(context.Context, JitNetworkAccessPoliciesList) (JitNetworkAccessPoliciesList, error)) JitNetworkAccessPoliciesListPage {
- return JitNetworkAccessPoliciesListPage{fn: getNextPage}
+// Creates a new instance of the IoTSecurityAggregatedAlertListPage type.
+func NewIoTSecurityAggregatedAlertListPage(getNextPage func(context.Context, IoTSecurityAggregatedAlertList) (IoTSecurityAggregatedAlertList, error)) IoTSecurityAggregatedAlertListPage {
+ return IoTSecurityAggregatedAlertListPage{fn: getNextPage}
}
-// JitNetworkAccessPolicy ...
-type JitNetworkAccessPolicy struct {
+// IoTSecurityAggregatedAlertProperties security Solution Aggregated Alert data
+type IoTSecurityAggregatedAlertProperties struct {
+ // AlertType - READ-ONLY; Name of the alert type
+ AlertType *string `json:"alertType,omitempty"`
+ // AlertDisplayName - READ-ONLY; Display name of the alert type
+ AlertDisplayName *string `json:"alertDisplayName,omitempty"`
+ // AggregatedDateUtc - READ-ONLY; The date the incidents were detected by the vendor
+ AggregatedDateUtc *date.Date `json:"aggregatedDateUtc,omitempty"`
+ // VendorName - READ-ONLY; Name of the vendor that discovered the incident
+ VendorName *string `json:"vendorName,omitempty"`
+ // ReportedSeverity - READ-ONLY; Estimated severity of this alert. Possible values include: 'Informational', 'Low', 'Medium', 'High'
+ ReportedSeverity ReportedSeverity `json:"reportedSeverity,omitempty"`
+ // RemediationSteps - READ-ONLY; Recommended steps for remediation
+ RemediationSteps *string `json:"remediationSteps,omitempty"`
+ // Description - READ-ONLY; Description of the incident and what it means
+ Description *string `json:"description,omitempty"`
+ // Count - READ-ONLY; Occurrence number of the alert within the aggregated date
+ Count *int32 `json:"count,omitempty"`
+ // EffectedResourceType - READ-ONLY; Azure resource ID of the resource that got the alerts
+ EffectedResourceType *string `json:"effectedResourceType,omitempty"`
+ // SystemSource - READ-ONLY; The type of the alerted resource (Azure, Non-Azure)
+ SystemSource *string `json:"systemSource,omitempty"`
+ // ActionTaken - READ-ONLY; The action that was taken as a response to the alert (Active, Blocked etc.)
+ ActionTaken *string `json:"actionTaken,omitempty"`
+ // LogAnalyticsQuery - READ-ONLY; query in log analytics to get the list of affected devices/alerts
+ LogAnalyticsQuery *string `json:"logAnalyticsQuery,omitempty"`
+}
+
+// IoTSecurityAggregatedRecommendation security Solution Recommendation Information
+type IoTSecurityAggregatedRecommendation struct {
autorest.Response `json:"-"`
// ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
@@ -4367,27 +4594,26 @@ type JitNetworkAccessPolicy struct {
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
- // Kind - Kind of the resource
- Kind *string `json:"kind,omitempty"`
- // Location - READ-ONLY; Location where the resource is stored
- Location *string `json:"location,omitempty"`
- *JitNetworkAccessPolicyProperties `json:"properties,omitempty"`
+ // Tags - Resource tags
+ Tags map[string]*string `json:"tags"`
+ // IoTSecurityAggregatedRecommendationProperties - Security Solution data
+ *IoTSecurityAggregatedRecommendationProperties `json:"properties,omitempty"`
}
-// MarshalJSON is the custom marshaler for JitNetworkAccessPolicy.
-func (jnap JitNetworkAccessPolicy) MarshalJSON() ([]byte, error) {
+// MarshalJSON is the custom marshaler for IoTSecurityAggregatedRecommendation.
+func (itsar IoTSecurityAggregatedRecommendation) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
- if jnap.Kind != nil {
- objectMap["kind"] = jnap.Kind
+ if itsar.Tags != nil {
+ objectMap["tags"] = itsar.Tags
}
- if jnap.JitNetworkAccessPolicyProperties != nil {
- objectMap["properties"] = jnap.JitNetworkAccessPolicyProperties
+ if itsar.IoTSecurityAggregatedRecommendationProperties != nil {
+ objectMap["properties"] = itsar.IoTSecurityAggregatedRecommendationProperties
}
return json.Marshal(objectMap)
}
-// UnmarshalJSON is the custom unmarshaler for JitNetworkAccessPolicy struct.
-func (jnap *JitNetworkAccessPolicy) UnmarshalJSON(body []byte) error {
+// UnmarshalJSON is the custom unmarshaler for IoTSecurityAggregatedRecommendation struct.
+func (itsar *IoTSecurityAggregatedRecommendation) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
@@ -4402,7 +4628,7 @@ func (jnap *JitNetworkAccessPolicy) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- jnap.ID = &ID
+ itsar.ID = &ID
}
case "name":
if v != nil {
@@ -4411,7 +4637,7 @@ func (jnap *JitNetworkAccessPolicy) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- jnap.Name = &name
+ itsar.Name = &name
}
case "type":
if v != nil {
@@ -4420,34 +4646,25 @@ func (jnap *JitNetworkAccessPolicy) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- jnap.Type = &typeVar
- }
- case "kind":
- if v != nil {
- var kind string
- err = json.Unmarshal(*v, &kind)
- if err != nil {
- return err
- }
- jnap.Kind = &kind
+ itsar.Type = &typeVar
}
- case "location":
+ case "tags":
if v != nil {
- var location string
- err = json.Unmarshal(*v, &location)
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
if err != nil {
return err
}
- jnap.Location = &location
+ itsar.Tags = tags
}
case "properties":
if v != nil {
- var jitNetworkAccessPolicyProperties JitNetworkAccessPolicyProperties
- err = json.Unmarshal(*v, &jitNetworkAccessPolicyProperties)
+ var ioTSecurityAggregatedRecommendationProperties IoTSecurityAggregatedRecommendationProperties
+ err = json.Unmarshal(*v, &ioTSecurityAggregatedRecommendationProperties)
if err != nil {
return err
}
- jnap.JitNetworkAccessPolicyProperties = &jitNetworkAccessPolicyProperties
+ itsar.IoTSecurityAggregatedRecommendationProperties = &ioTSecurityAggregatedRecommendationProperties
}
}
}
@@ -4455,56 +4672,910 @@ func (jnap *JitNetworkAccessPolicy) UnmarshalJSON(body []byte) error {
return nil
}
-// JitNetworkAccessPolicyInitiatePort ...
-type JitNetworkAccessPolicyInitiatePort struct {
- Number *int32 `json:"number,omitempty"`
- // AllowedSourceAddressPrefix - Source of the allowed traffic. If omitted, the request will be for the source IP address of the initiate request.
- AllowedSourceAddressPrefix *string `json:"allowedSourceAddressPrefix,omitempty"`
- // EndTimeUtc - The time to close the request in UTC
- EndTimeUtc *date.Time `json:"endTimeUtc,omitempty"`
+// IoTSecurityAggregatedRecommendationList list of IoT aggregated security recommendations
+type IoTSecurityAggregatedRecommendationList struct {
+ autorest.Response `json:"-"`
+ // Value - List of aggregated alerts data
+ Value *[]IoTSecurityAggregatedRecommendation `json:"value,omitempty"`
+ // NextLink - READ-ONLY; The URI to fetch the next page.
+ NextLink *string `json:"nextLink,omitempty"`
}
-// JitNetworkAccessPolicyInitiateRequest ...
-type JitNetworkAccessPolicyInitiateRequest struct {
- // VirtualMachines - A list of virtual machines & ports to open access for
- VirtualMachines *[]JitNetworkAccessPolicyInitiateVirtualMachine `json:"virtualMachines,omitempty"`
+// IoTSecurityAggregatedRecommendationListIterator provides access to a complete listing of
+// IoTSecurityAggregatedRecommendation values.
+type IoTSecurityAggregatedRecommendationListIterator struct {
+ i int
+ page IoTSecurityAggregatedRecommendationListPage
}
-// JitNetworkAccessPolicyInitiateVirtualMachine ...
-type JitNetworkAccessPolicyInitiateVirtualMachine struct {
- // ID - Resource ID of the virtual machine that is linked to this policy
- ID *string `json:"id,omitempty"`
- // Ports - The ports to open for the resource with the `id`
- Ports *[]JitNetworkAccessPolicyInitiatePort `json:"ports,omitempty"`
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *IoTSecurityAggregatedRecommendationListIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/IoTSecurityAggregatedRecommendationListIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
}
-// JitNetworkAccessPolicyProperties ...
-type JitNetworkAccessPolicyProperties struct {
- // VirtualMachines - Configurations for Microsoft.Compute/virtualMachines resource type.
- VirtualMachines *[]JitNetworkAccessPolicyVirtualMachine `json:"virtualMachines,omitempty"`
- Requests *[]JitNetworkAccessRequest `json:"requests,omitempty"`
- // ProvisioningState - READ-ONLY; Gets the provisioning state of the Just-in-Time policy.
- ProvisioningState *string `json:"provisioningState,omitempty"`
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *IoTSecurityAggregatedRecommendationListIterator) Next() error {
+ return iter.NextWithContext(context.Background())
}
-// JitNetworkAccessPolicyVirtualMachine ...
-type JitNetworkAccessPolicyVirtualMachine struct {
- // ID - Resource ID of the virtual machine that is linked to this policy
- ID *string `json:"id,omitempty"`
- // Ports - Port configurations for the virtual machine
- Ports *[]JitNetworkAccessPortRule `json:"ports,omitempty"`
- // PublicIPAddress - Public IP address of the Azure Firewall that is linked to this policy, if applicable
- PublicIPAddress *string `json:"publicIpAddress,omitempty"`
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter IoTSecurityAggregatedRecommendationListIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
}
-// JitNetworkAccessPortRule ...
-type JitNetworkAccessPortRule struct {
- Number *int32 `json:"number,omitempty"`
- // Protocol - Possible values include: 'TCP', 'UDP', 'All'
- Protocol Protocol `json:"protocol,omitempty"`
- // AllowedSourceAddressPrefix - Mutually exclusive with the "allowedSourceAddressPrefixes" parameter. Should be an IP address or CIDR, for example "192.168.0.3" or "192.168.0.0/16".
- AllowedSourceAddressPrefix *string `json:"allowedSourceAddressPrefix,omitempty"`
- // AllowedSourceAddressPrefixes - Mutually exclusive with the "allowedSourceAddressPrefix" parameter.
+// Response returns the raw server response from the last page request.
+func (iter IoTSecurityAggregatedRecommendationListIterator) Response() IoTSecurityAggregatedRecommendationList {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter IoTSecurityAggregatedRecommendationListIterator) Value() IoTSecurityAggregatedRecommendation {
+ if !iter.page.NotDone() {
+ return IoTSecurityAggregatedRecommendation{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the IoTSecurityAggregatedRecommendationListIterator type.
+func NewIoTSecurityAggregatedRecommendationListIterator(page IoTSecurityAggregatedRecommendationListPage) IoTSecurityAggregatedRecommendationListIterator {
+ return IoTSecurityAggregatedRecommendationListIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (itsarl IoTSecurityAggregatedRecommendationList) IsEmpty() bool {
+ return itsarl.Value == nil || len(*itsarl.Value) == 0
+}
+
+// ioTSecurityAggregatedRecommendationListPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (itsarl IoTSecurityAggregatedRecommendationList) ioTSecurityAggregatedRecommendationListPreparer(ctx context.Context) (*http.Request, error) {
+ if itsarl.NextLink == nil || len(to.String(itsarl.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(itsarl.NextLink)))
+}
+
+// IoTSecurityAggregatedRecommendationListPage contains a page of IoTSecurityAggregatedRecommendation
+// values.
+type IoTSecurityAggregatedRecommendationListPage struct {
+ fn func(context.Context, IoTSecurityAggregatedRecommendationList) (IoTSecurityAggregatedRecommendationList, error)
+ itsarl IoTSecurityAggregatedRecommendationList
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *IoTSecurityAggregatedRecommendationListPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/IoTSecurityAggregatedRecommendationListPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.itsarl)
+ if err != nil {
+ return err
+ }
+ page.itsarl = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *IoTSecurityAggregatedRecommendationListPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page IoTSecurityAggregatedRecommendationListPage) NotDone() bool {
+ return !page.itsarl.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page IoTSecurityAggregatedRecommendationListPage) Response() IoTSecurityAggregatedRecommendationList {
+ return page.itsarl
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page IoTSecurityAggregatedRecommendationListPage) Values() []IoTSecurityAggregatedRecommendation {
+ if page.itsarl.IsEmpty() {
+ return nil
+ }
+ return *page.itsarl.Value
+}
+
+// Creates a new instance of the IoTSecurityAggregatedRecommendationListPage type.
+func NewIoTSecurityAggregatedRecommendationListPage(getNextPage func(context.Context, IoTSecurityAggregatedRecommendationList) (IoTSecurityAggregatedRecommendationList, error)) IoTSecurityAggregatedRecommendationListPage {
+ return IoTSecurityAggregatedRecommendationListPage{fn: getNextPage}
+}
+
+// IoTSecurityAggregatedRecommendationProperties security Solution Recommendation Information
+type IoTSecurityAggregatedRecommendationProperties struct {
+ // RecommendationName - Name of the recommendation
+ RecommendationName *string `json:"recommendationName,omitempty"`
+ // RecommendationDisplayName - READ-ONLY; Display name of the recommendation type.
+ RecommendationDisplayName *string `json:"recommendationDisplayName,omitempty"`
+ // Description - READ-ONLY; Description of the incident and what it means
+ Description *string `json:"description,omitempty"`
+ // RecommendationTypeID - READ-ONLY; The recommendation-type GUID.
+ RecommendationTypeID *string `json:"recommendationTypeId,omitempty"`
+ // DetectedBy - READ-ONLY; Name of the vendor that discovered the issue
+ DetectedBy *string `json:"detectedBy,omitempty"`
+ // RemediationSteps - READ-ONLY; Recommended steps for remediation
+ RemediationSteps *string `json:"remediationSteps,omitempty"`
+ // ReportedSeverity - READ-ONLY; Estimated severity of this recommendation. Possible values include: 'Informational', 'Low', 'Medium', 'High'
+ ReportedSeverity ReportedSeverity `json:"reportedSeverity,omitempty"`
+ // HealthyDevices - READ-ONLY; the number of the healthy devices within the solution
+ HealthyDevices *int32 `json:"healthyDevices,omitempty"`
+ // UnhealthyDeviceCount - READ-ONLY; the number of the unhealthy devices within the solution
+ UnhealthyDeviceCount *int32 `json:"unhealthyDeviceCount,omitempty"`
+ // LogAnalyticsQuery - READ-ONLY; query in log analytics to get the list of affected devices/alerts
+ LogAnalyticsQuery *string `json:"logAnalyticsQuery,omitempty"`
+}
+
+// IoTSecurityAlertedDevice statistic information about the number of alerts per device during the last
+// period
+type IoTSecurityAlertedDevice struct {
+ // DeviceID - READ-ONLY; Name of the alert type
+ DeviceID *string `json:"deviceId,omitempty"`
+ // AlertsCount - READ-ONLY; the number of alerts raised for this device
+ AlertsCount *int32 `json:"alertsCount,omitempty"`
+}
+
+// IoTSecurityAlertedDevicesList list of devices with the count of raised alerts
+type IoTSecurityAlertedDevicesList struct {
+ // Value - List of aggregated alerts data
+ Value *[]IoTSecurityAlertedDevice `json:"value,omitempty"`
+ // NextLink - READ-ONLY; The URI to fetch the next page.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// IoTSecurityDeviceAlert statistic information about the number of alerts per alert type during the last
+// period
+type IoTSecurityDeviceAlert struct {
+ // AlertDisplayName - READ-ONLY; Display name of the alert
+ AlertDisplayName *string `json:"alertDisplayName,omitempty"`
+ // ReportedSeverity - READ-ONLY; Estimated severity of this alert. Possible values include: 'Informational', 'Low', 'Medium', 'High'
+ ReportedSeverity ReportedSeverity `json:"reportedSeverity,omitempty"`
+ // AlertsCount - READ-ONLY; the number of alerts raised for this alert type
+ AlertsCount *int32 `json:"alertsCount,omitempty"`
+}
+
+// IoTSecurityDeviceAlertsList list of alerts with the count of raised alerts
+type IoTSecurityDeviceAlertsList struct {
+ // Value - List of top alerts data
+ Value *[]IoTSecurityDeviceAlert `json:"value,omitempty"`
+ // NextLink - READ-ONLY; The URI to fetch the next page.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// IoTSecurityDeviceRecommendation statistic information about the number of recommendations per
+// recommendation type
+type IoTSecurityDeviceRecommendation struct {
+ // RecommendationDisplayName - READ-ONLY; Display name of the recommendation
+ RecommendationDisplayName *string `json:"recommendationDisplayName,omitempty"`
+ // ReportedSeverity - READ-ONLY; Estimated severity of this recommendation. Possible values include: 'Informational', 'Low', 'Medium', 'High'
+ ReportedSeverity ReportedSeverity `json:"reportedSeverity,omitempty"`
+ // DevicesCount - READ-ONLY; the number of device with this recommendation
+ DevicesCount *int32 `json:"devicesCount,omitempty"`
+}
+
+// IoTSecurityDeviceRecommendationsList list of recommendations with the count of devices
+type IoTSecurityDeviceRecommendationsList struct {
+ // Value - List of aggregated recommendation data
+ Value *[]IoTSecurityDeviceRecommendation `json:"value,omitempty"`
+ // NextLink - READ-ONLY; The URI to fetch the next page.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// IoTSecuritySolutionAnalyticsModel security Analytics of a security solution
+type IoTSecuritySolutionAnalyticsModel struct {
+ autorest.Response `json:"-"`
+ // IoTSecuritySolutionAnalyticsModelProperties - Security Solution Aggregated Alert data
+ *IoTSecuritySolutionAnalyticsModelProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Resource Id
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Resource name
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Resource type
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for IoTSecuritySolutionAnalyticsModel.
+func (itssam IoTSecuritySolutionAnalyticsModel) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if itssam.IoTSecuritySolutionAnalyticsModelProperties != nil {
+ objectMap["properties"] = itssam.IoTSecuritySolutionAnalyticsModelProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for IoTSecuritySolutionAnalyticsModel struct.
+func (itssam *IoTSecuritySolutionAnalyticsModel) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var ioTSecuritySolutionAnalyticsModelProperties IoTSecuritySolutionAnalyticsModelProperties
+ err = json.Unmarshal(*v, &ioTSecuritySolutionAnalyticsModelProperties)
+ if err != nil {
+ return err
+ }
+ itssam.IoTSecuritySolutionAnalyticsModelProperties = &ioTSecuritySolutionAnalyticsModelProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ itssam.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ itssam.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ itssam.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// IoTSecuritySolutionAnalyticsModelList list of Security Analytics of a security solution
+type IoTSecuritySolutionAnalyticsModelList struct {
+ autorest.Response `json:"-"`
+ // Value - List of Security Analytics of a security solution
+ Value *[]IoTSecuritySolutionAnalyticsModel `json:"value,omitempty"`
+ // NextLink - READ-ONLY; The URI to fetch the next page.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// IoTSecuritySolutionAnalyticsModelProperties security Analytics of a security solution properties
+type IoTSecuritySolutionAnalyticsModelProperties struct {
+ // Metrics - READ-ONLY; Security Analytics of a security solution
+ Metrics *IoTSeverityMetrics `json:"metrics,omitempty"`
+ // UnhealthyDeviceCount - READ-ONLY; number of unhealthy devices
+ UnhealthyDeviceCount *int32 `json:"unhealthyDeviceCount,omitempty"`
+ // DevicesMetrics - READ-ONLY; The list of devices metrics by the aggregated date.
+ DevicesMetrics *[]IoTSecuritySolutionAnalyticsModelPropertiesDevicesMetricsItem `json:"devicesMetrics,omitempty"`
+ // TopAlertedDevices - The list of top 3 devices with the most attacked.
+ TopAlertedDevices *IoTSecurityAlertedDevicesList `json:"topAlertedDevices,omitempty"`
+ // MostPrevalentDeviceAlerts - The list of most prevalent 3 alerts.
+ MostPrevalentDeviceAlerts *IoTSecurityDeviceAlertsList `json:"mostPrevalentDeviceAlerts,omitempty"`
+ // MostPrevalentDeviceRecommendations - The list of most prevalent 3 recommendations.
+ MostPrevalentDeviceRecommendations *IoTSecurityDeviceRecommendationsList `json:"mostPrevalentDeviceRecommendations,omitempty"`
+}
+
+// IoTSecuritySolutionAnalyticsModelPropertiesDevicesMetricsItem ...
+type IoTSecuritySolutionAnalyticsModelPropertiesDevicesMetricsItem struct {
+ // Date - the date of the metrics
+ Date *date.Time `json:"date,omitempty"`
+ // DevicesMetrics - devices alerts count by severity.
+ DevicesMetrics *IoTSeverityMetrics `json:"devicesMetrics,omitempty"`
+}
+
+// IoTSecuritySolutionModel security Solution
+type IoTSecuritySolutionModel struct {
+ autorest.Response `json:"-"`
+ // ID - READ-ONLY; Resource Id
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Resource name
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Resource type
+ Type *string `json:"type,omitempty"`
+ // Tags - Resource tags
+ Tags map[string]*string `json:"tags"`
+ // Location - The resource location.
+ Location *string `json:"location,omitempty"`
+ // IoTSecuritySolutionProperties - Security Solution data
+ *IoTSecuritySolutionProperties `json:"properties,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for IoTSecuritySolutionModel.
+func (itssm IoTSecuritySolutionModel) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if itssm.Tags != nil {
+ objectMap["tags"] = itssm.Tags
+ }
+ if itssm.Location != nil {
+ objectMap["location"] = itssm.Location
+ }
+ if itssm.IoTSecuritySolutionProperties != nil {
+ objectMap["properties"] = itssm.IoTSecuritySolutionProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for IoTSecuritySolutionModel struct.
+func (itssm *IoTSecuritySolutionModel) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ itssm.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ itssm.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ itssm.Type = &typeVar
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ itssm.Tags = tags
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ itssm.Location = &location
+ }
+ case "properties":
+ if v != nil {
+ var ioTSecuritySolutionProperties IoTSecuritySolutionProperties
+ err = json.Unmarshal(*v, &ioTSecuritySolutionProperties)
+ if err != nil {
+ return err
+ }
+ itssm.IoTSecuritySolutionProperties = &ioTSecuritySolutionProperties
+ }
+ }
+ }
+
+ return nil
+}
+
+// IoTSecuritySolutionProperties security Solution setting data
+type IoTSecuritySolutionProperties struct {
+ // Workspace - Workspace resource ID
+ Workspace *string `json:"workspace,omitempty"`
+ // DisplayName - Resource display name.
+ DisplayName *string `json:"displayName,omitempty"`
+ // Status - Security solution status. Possible values include: 'SolutionStatusEnabled', 'SolutionStatusDisabled'
+ Status SolutionStatus `json:"status,omitempty"`
+ // Export - List of additional export to workspace data options
+ Export *[]ExportData `json:"export,omitempty"`
+ // DisabledDataSources - Disabled data sources. Disabling these data sources compromises the system.
+ DisabledDataSources *[]DataSource `json:"disabledDataSources,omitempty"`
+ // IotHubs - IoT Hub resource IDs
+ IotHubs *[]string `json:"iotHubs,omitempty"`
+ UserDefinedResources *UserDefinedResourcesProperties `json:"userDefinedResources,omitempty"`
+ // AutoDiscoveredResources - READ-ONLY; List of resources that were automatically discovered as relevant to the security solution.
+ AutoDiscoveredResources *[]string `json:"autoDiscoveredResources,omitempty"`
+ RecommendationsConfiguration *[]RecommendationConfigurationProperties `json:"recommendationsConfiguration,omitempty"`
+}
+
+// IoTSecuritySolutionsList list of iot solutions
+type IoTSecuritySolutionsList struct {
+ autorest.Response `json:"-"`
+ // Value - List of security solutions
+ Value *[]IoTSecuritySolutionModel `json:"value,omitempty"`
+ // NextLink - READ-ONLY; The URI to fetch the next page.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// IoTSecuritySolutionsListIterator provides access to a complete listing of IoTSecuritySolutionModel
+// values.
+type IoTSecuritySolutionsListIterator struct {
+ i int
+ page IoTSecuritySolutionsListPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *IoTSecuritySolutionsListIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/IoTSecuritySolutionsListIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *IoTSecuritySolutionsListIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter IoTSecuritySolutionsListIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter IoTSecuritySolutionsListIterator) Response() IoTSecuritySolutionsList {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter IoTSecuritySolutionsListIterator) Value() IoTSecuritySolutionModel {
+ if !iter.page.NotDone() {
+ return IoTSecuritySolutionModel{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the IoTSecuritySolutionsListIterator type.
+func NewIoTSecuritySolutionsListIterator(page IoTSecuritySolutionsListPage) IoTSecuritySolutionsListIterator {
+ return IoTSecuritySolutionsListIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (itssl IoTSecuritySolutionsList) IsEmpty() bool {
+ return itssl.Value == nil || len(*itssl.Value) == 0
+}
+
+// ioTSecuritySolutionsListPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (itssl IoTSecuritySolutionsList) ioTSecuritySolutionsListPreparer(ctx context.Context) (*http.Request, error) {
+ if itssl.NextLink == nil || len(to.String(itssl.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(itssl.NextLink)))
+}
+
+// IoTSecuritySolutionsListPage contains a page of IoTSecuritySolutionModel values.
+type IoTSecuritySolutionsListPage struct {
+ fn func(context.Context, IoTSecuritySolutionsList) (IoTSecuritySolutionsList, error)
+ itssl IoTSecuritySolutionsList
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *IoTSecuritySolutionsListPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/IoTSecuritySolutionsListPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.itssl)
+ if err != nil {
+ return err
+ }
+ page.itssl = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *IoTSecuritySolutionsListPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page IoTSecuritySolutionsListPage) NotDone() bool {
+ return !page.itssl.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page IoTSecuritySolutionsListPage) Response() IoTSecuritySolutionsList {
+ return page.itssl
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page IoTSecuritySolutionsListPage) Values() []IoTSecuritySolutionModel {
+ if page.itssl.IsEmpty() {
+ return nil
+ }
+ return *page.itssl.Value
+}
+
+// Creates a new instance of the IoTSecuritySolutionsListPage type.
+func NewIoTSecuritySolutionsListPage(getNextPage func(context.Context, IoTSecuritySolutionsList) (IoTSecuritySolutionsList, error)) IoTSecuritySolutionsListPage {
+ return IoTSecuritySolutionsListPage{fn: getNextPage}
+}
+
+// IoTSeverityMetrics severity metrics
+type IoTSeverityMetrics struct {
+ // High - count of high severity items
+ High *int32 `json:"high,omitempty"`
+ // Medium - count of medium severity items
+ Medium *int32 `json:"medium,omitempty"`
+ // Low - count of low severity items
+ Low *int32 `json:"low,omitempty"`
+}
+
+// JitNetworkAccessPoliciesList ...
+type JitNetworkAccessPoliciesList struct {
+ autorest.Response `json:"-"`
+ Value *[]JitNetworkAccessPolicy `json:"value,omitempty"`
+ // NextLink - READ-ONLY; The URI to fetch the next page.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// JitNetworkAccessPoliciesListIterator provides access to a complete listing of JitNetworkAccessPolicy
+// values.
+type JitNetworkAccessPoliciesListIterator struct {
+ i int
+ page JitNetworkAccessPoliciesListPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *JitNetworkAccessPoliciesListIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/JitNetworkAccessPoliciesListIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *JitNetworkAccessPoliciesListIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter JitNetworkAccessPoliciesListIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter JitNetworkAccessPoliciesListIterator) Response() JitNetworkAccessPoliciesList {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter JitNetworkAccessPoliciesListIterator) Value() JitNetworkAccessPolicy {
+ if !iter.page.NotDone() {
+ return JitNetworkAccessPolicy{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the JitNetworkAccessPoliciesListIterator type.
+func NewJitNetworkAccessPoliciesListIterator(page JitNetworkAccessPoliciesListPage) JitNetworkAccessPoliciesListIterator {
+ return JitNetworkAccessPoliciesListIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (jnapl JitNetworkAccessPoliciesList) IsEmpty() bool {
+ return jnapl.Value == nil || len(*jnapl.Value) == 0
+}
+
+// jitNetworkAccessPoliciesListPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (jnapl JitNetworkAccessPoliciesList) jitNetworkAccessPoliciesListPreparer(ctx context.Context) (*http.Request, error) {
+ if jnapl.NextLink == nil || len(to.String(jnapl.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(jnapl.NextLink)))
+}
+
+// JitNetworkAccessPoliciesListPage contains a page of JitNetworkAccessPolicy values.
+type JitNetworkAccessPoliciesListPage struct {
+ fn func(context.Context, JitNetworkAccessPoliciesList) (JitNetworkAccessPoliciesList, error)
+ jnapl JitNetworkAccessPoliciesList
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *JitNetworkAccessPoliciesListPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/JitNetworkAccessPoliciesListPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.jnapl)
+ if err != nil {
+ return err
+ }
+ page.jnapl = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *JitNetworkAccessPoliciesListPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page JitNetworkAccessPoliciesListPage) NotDone() bool {
+ return !page.jnapl.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page JitNetworkAccessPoliciesListPage) Response() JitNetworkAccessPoliciesList {
+ return page.jnapl
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page JitNetworkAccessPoliciesListPage) Values() []JitNetworkAccessPolicy {
+ if page.jnapl.IsEmpty() {
+ return nil
+ }
+ return *page.jnapl.Value
+}
+
+// Creates a new instance of the JitNetworkAccessPoliciesListPage type.
+func NewJitNetworkAccessPoliciesListPage(getNextPage func(context.Context, JitNetworkAccessPoliciesList) (JitNetworkAccessPoliciesList, error)) JitNetworkAccessPoliciesListPage {
+ return JitNetworkAccessPoliciesListPage{fn: getNextPage}
+}
+
+// JitNetworkAccessPolicy ...
+type JitNetworkAccessPolicy struct {
+ autorest.Response `json:"-"`
+ // ID - READ-ONLY; Resource Id
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Resource name
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Resource type
+ Type *string `json:"type,omitempty"`
+ // Kind - Kind of the resource
+ Kind *string `json:"kind,omitempty"`
+ // Location - READ-ONLY; Location where the resource is stored
+ Location *string `json:"location,omitempty"`
+ *JitNetworkAccessPolicyProperties `json:"properties,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for JitNetworkAccessPolicy.
+func (jnap JitNetworkAccessPolicy) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if jnap.Kind != nil {
+ objectMap["kind"] = jnap.Kind
+ }
+ if jnap.JitNetworkAccessPolicyProperties != nil {
+ objectMap["properties"] = jnap.JitNetworkAccessPolicyProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for JitNetworkAccessPolicy struct.
+func (jnap *JitNetworkAccessPolicy) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ jnap.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ jnap.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ jnap.Type = &typeVar
+ }
+ case "kind":
+ if v != nil {
+ var kind string
+ err = json.Unmarshal(*v, &kind)
+ if err != nil {
+ return err
+ }
+ jnap.Kind = &kind
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ jnap.Location = &location
+ }
+ case "properties":
+ if v != nil {
+ var jitNetworkAccessPolicyProperties JitNetworkAccessPolicyProperties
+ err = json.Unmarshal(*v, &jitNetworkAccessPolicyProperties)
+ if err != nil {
+ return err
+ }
+ jnap.JitNetworkAccessPolicyProperties = &jitNetworkAccessPolicyProperties
+ }
+ }
+ }
+
+ return nil
+}
+
+// JitNetworkAccessPolicyInitiatePort ...
+type JitNetworkAccessPolicyInitiatePort struct {
+ Number *int32 `json:"number,omitempty"`
+ // AllowedSourceAddressPrefix - Source of the allowed traffic. If omitted, the request will be for the source IP address of the initiate request.
+ AllowedSourceAddressPrefix *string `json:"allowedSourceAddressPrefix,omitempty"`
+ // EndTimeUtc - The time to close the request in UTC
+ EndTimeUtc *date.Time `json:"endTimeUtc,omitempty"`
+}
+
+// JitNetworkAccessPolicyInitiateRequest ...
+type JitNetworkAccessPolicyInitiateRequest struct {
+ // VirtualMachines - A list of virtual machines & ports to open access for
+ VirtualMachines *[]JitNetworkAccessPolicyInitiateVirtualMachine `json:"virtualMachines,omitempty"`
+}
+
+// JitNetworkAccessPolicyInitiateVirtualMachine ...
+type JitNetworkAccessPolicyInitiateVirtualMachine struct {
+ // ID - Resource ID of the virtual machine that is linked to this policy
+ ID *string `json:"id,omitempty"`
+ // Ports - The ports to open for the resource with the `id`
+ Ports *[]JitNetworkAccessPolicyInitiatePort `json:"ports,omitempty"`
+}
+
+// JitNetworkAccessPolicyProperties ...
+type JitNetworkAccessPolicyProperties struct {
+ // VirtualMachines - Configurations for Microsoft.Compute/virtualMachines resource type.
+ VirtualMachines *[]JitNetworkAccessPolicyVirtualMachine `json:"virtualMachines,omitempty"`
+ Requests *[]JitNetworkAccessRequest `json:"requests,omitempty"`
+ // ProvisioningState - READ-ONLY; Gets the provisioning state of the Just-in-Time policy.
+ ProvisioningState *string `json:"provisioningState,omitempty"`
+}
+
+// JitNetworkAccessPolicyVirtualMachine ...
+type JitNetworkAccessPolicyVirtualMachine struct {
+ // ID - Resource ID of the virtual machine that is linked to this policy
+ ID *string `json:"id,omitempty"`
+ // Ports - Port configurations for the virtual machine
+ Ports *[]JitNetworkAccessPortRule `json:"ports,omitempty"`
+ // PublicIPAddress - Public IP address of the Azure Firewall that is linked to this policy, if applicable
+ PublicIPAddress *string `json:"publicIpAddress,omitempty"`
+}
+
+// JitNetworkAccessPortRule ...
+type JitNetworkAccessPortRule struct {
+ Number *int32 `json:"number,omitempty"`
+ // Protocol - Possible values include: 'TCP', 'UDP', 'All'
+ Protocol Protocol `json:"protocol,omitempty"`
+ // AllowedSourceAddressPrefix - Mutually exclusive with the "allowedSourceAddressPrefixes" parameter. Should be an IP address or CIDR, for example "192.168.0.3" or "192.168.0.0/16".
+ AllowedSourceAddressPrefix *string `json:"allowedSourceAddressPrefix,omitempty"`
+ // AllowedSourceAddressPrefixes - Mutually exclusive with the "allowedSourceAddressPrefix" parameter.
AllowedSourceAddressPrefixes *[]string `json:"allowedSourceAddressPrefixes,omitempty"`
// MaxRequestAccessDuration - Maximum duration requests can be made for. In ISO 8601 duration format. Minimum 5 minutes, maximum 1 day
MaxRequestAccessDuration *string `json:"maxRequestAccessDuration,omitempty"`
@@ -4757,7 +5828,9 @@ type PathRecommendation struct {
ConfigurationStatus ConfigurationStatus1 `json:"configurationStatus,omitempty"`
}
-// Pricing pricing tier will be applied for the scope based on the resource ID
+// Pricing azure Security Center is provided in two pricing tiers: free and standard, with the standard
+// tier available with a trial period. The standard tier offers advanced security capabilities, while the
+// free tier offers basic security features.
type Pricing struct {
autorest.Response `json:"-"`
// PricingProperties - Pricing data
@@ -4830,7 +5903,7 @@ func (p *Pricing) UnmarshalJSON(body []byte) error {
return nil
}
-// PricingList list of pricing configurations response
+// PricingList list of pricing configurations response.
type PricingList struct {
autorest.Response `json:"-"`
// Value - List of pricing configurations
@@ -4857,6 +5930,16 @@ type PublisherInfo struct {
Version *string `json:"version,omitempty"`
}
+// RecommendationConfigurationProperties recommendation configuration
+type RecommendationConfigurationProperties struct {
+ // RecommendationType - The recommendation type. Possible values include: 'IoTACRAuthentication', 'IoTAgentSendsUnutilizedMessages', 'IoTBaseline', 'IoTEdgeHubMemOptimize', 'IoTEdgeLoggingOptions', 'IoTInconsistentModuleSettings', 'IoTInstallAgent', 'IoTIPFilterDenyAll', 'IoTIPFilterPermissiveRule', 'IoTOpenPorts', 'IoTPermissiveFirewallPolicy', 'IoTPermissiveInputFirewallRules', 'IoTPermissiveOutputFirewallRules', 'IoTPrivilegedDockerOptions', 'IoTSharedCredentials', 'IoTVulnerableTLSCipherSuite'
+ RecommendationType RecommendationType `json:"recommendationType,omitempty"`
+ // Name - READ-ONLY
+ Name *string `json:"name,omitempty"`
+ // Status - Recommendation status. The recommendation is not generated when the status is disabled. Possible values include: 'Disabled', 'Enabled'
+ Status RecommendationConfigStatus `json:"status,omitempty"`
+}
+
// RegulatoryComplianceAssessment regulatory compliance assessment details and state
type RegulatoryComplianceAssessment struct {
autorest.Response `json:"-"`
@@ -5769,6 +6852,21 @@ func NewSettingsListPage(getNextPage func(context.Context, SettingsList) (Settin
return SettingsListPage{fn: getNextPage}
}
+// TagsResource a container holding only the Tags for a resource, allowing the user to update the tags.
+type TagsResource struct {
+ // Tags - Resource tags
+ Tags map[string]*string `json:"tags"`
+}
+
+// MarshalJSON is the custom marshaler for TagsResource.
+func (tr TagsResource) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if tr.Tags != nil {
+ objectMap["tags"] = tr.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
// Task security task that we recommend to do in order to strengthen security
type Task struct {
autorest.Response `json:"-"`
@@ -6357,6 +7455,37 @@ type TopologySingleResourceParent struct {
ResourceID *string `json:"resourceId,omitempty"`
}
+// UpdateIotSecuritySolutionData ...
+type UpdateIotSecuritySolutionData struct {
+ UserDefinedResources *UserDefinedResourcesProperties `json:"userDefinedResources,omitempty"`
+ RecommendationsConfiguration *[]RecommendationConfigurationProperties `json:"recommendationsConfiguration,omitempty"`
+ // Tags - Resource tags
+ Tags map[string]*string `json:"tags"`
+}
+
+// MarshalJSON is the custom marshaler for UpdateIotSecuritySolutionData.
+func (uissd UpdateIotSecuritySolutionData) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if uissd.UserDefinedResources != nil {
+ objectMap["userDefinedResources"] = uissd.UserDefinedResources
+ }
+ if uissd.RecommendationsConfiguration != nil {
+ objectMap["recommendationsConfiguration"] = uissd.RecommendationsConfiguration
+ }
+ if uissd.Tags != nil {
+ objectMap["tags"] = uissd.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// UserDefinedResourcesProperties properties of the solution's user defined resources.
+type UserDefinedResourcesProperties struct {
+ // Query - Azure Resource Graph query which represents the security solution's user defined resources. Required to start with "where type != "Microsoft.Devices/IotHubs""
+ Query *string `json:"query,omitempty"`
+ // QuerySubscriptions - List of Azure subscription ids on which the user defined resources query should be executed.
+ QuerySubscriptions *[]string `json:"querySubscriptions,omitempty"`
+}
+
// UserRecommendation represents a user that is recommended to be allowed for a certain rule
type UserRecommendation struct {
// Username - Represents a user that is recommended to be allowed for a certain rule
diff --git a/services/preview/security/mgmt/v2.0/security/pricings.go b/services/preview/security/mgmt/v2.0/security/pricings.go
index 07566049a986..d43a4ac71fb5 100644
--- a/services/preview/security/mgmt/v2.0/security/pricings.go
+++ b/services/preview/security/mgmt/v2.0/security/pricings.go
@@ -41,9 +41,7 @@ func NewPricingsClientWithBaseURI(baseURI string, subscriptionID string, ascLoca
return PricingsClient{NewWithBaseURI(baseURI, subscriptionID, ascLocation)}
}
-// Get a given security pricing configuration in the subscription. Azure Security Center is available in two pricing
-// tiers: Free and Standard, on multiple resource types, including Virtual machines, SQL Servers, App service plans and
-// Storage accounts.
+// Get gets a provided Security Center pricing configuration in the subscription.
// Parameters:
// pricingName - name of the pricing configuration
func (client PricingsClient) Get(ctx context.Context, pricingName string) (result Pricing, err error) {
@@ -124,9 +122,7 @@ func (client PricingsClient) GetResponder(resp *http.Response) (result Pricing,
return
}
-// List a given security pricing configuration in the subscription. Azure Security Center is available in two pricing
-// tiers: Free and Standard, on multiple resource types, including Virtual machines, SQL Servers, App service plans and
-// Storage accounts.
+// List lists Security Center pricing configurations in the subscription.
func (client PricingsClient) List(ctx context.Context) (result PricingList, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/PricingsClient.List")
@@ -204,9 +200,7 @@ func (client PricingsClient) ListResponder(resp *http.Response) (result PricingL
return
}
-// Update a given security pricing configuration in the subscription. Azure Security Center is available in two pricing
-// tiers: Free and Standard, on multiple resource types, including Virtual machines, SQL Servers, App service plans and
-// Storage accounts.
+// Update updates a provided Security Center pricing configuration in the subscription.
// Parameters:
// pricingName - name of the pricing configuration
// pricing - pricing object
diff --git a/services/preview/security/mgmt/v2.0/security/securityapi/interfaces.go b/services/preview/security/mgmt/v2.0/security/securityapi/interfaces.go
index 357335dd3a70..dde4288d926e 100644
--- a/services/preview/security/mgmt/v2.0/security/securityapi/interfaces.go
+++ b/services/preview/security/mgmt/v2.0/security/securityapi/interfaces.go
@@ -131,6 +131,67 @@ type InformationProtectionPoliciesClientAPI interface {
var _ InformationProtectionPoliciesClientAPI = (*security.InformationProtectionPoliciesClient)(nil)
+// IoTSecuritySolutionsClientAPI contains the set of methods on the IoTSecuritySolutionsClient type.
+type IoTSecuritySolutionsClientAPI interface {
+ List(ctx context.Context, filter string) (result security.IoTSecuritySolutionsListPage, err error)
+}
+
+var _ IoTSecuritySolutionsClientAPI = (*security.IoTSecuritySolutionsClient)(nil)
+
+// IoTSecuritySolutionsResourceGroupClientAPI contains the set of methods on the IoTSecuritySolutionsResourceGroupClient type.
+type IoTSecuritySolutionsResourceGroupClientAPI interface {
+ List(ctx context.Context, resourceGroupName string, filter string) (result security.IoTSecuritySolutionsListPage, err error)
+}
+
+var _ IoTSecuritySolutionsResourceGroupClientAPI = (*security.IoTSecuritySolutionsResourceGroupClient)(nil)
+
+// IotSecuritySolutionClientAPI contains the set of methods on the IotSecuritySolutionClient type.
+type IotSecuritySolutionClientAPI interface {
+ Create(ctx context.Context, resourceGroupName string, solutionName string, iotSecuritySolutionData security.IoTSecuritySolutionModel) (result security.IoTSecuritySolutionModel, err error)
+ Delete(ctx context.Context, resourceGroupName string, solutionName string) (result autorest.Response, err error)
+ Get(ctx context.Context, resourceGroupName string, solutionName string) (result security.IoTSecuritySolutionModel, err error)
+ Update(ctx context.Context, resourceGroupName string, solutionName string, updateIotSecuritySolutionData security.UpdateIotSecuritySolutionData) (result security.IoTSecuritySolutionModel, err error)
+}
+
+var _ IotSecuritySolutionClientAPI = (*security.IotSecuritySolutionClient)(nil)
+
+// IoTSecuritySolutionsAnalyticsClientAPI contains the set of methods on the IoTSecuritySolutionsAnalyticsClient type.
+type IoTSecuritySolutionsAnalyticsClientAPI interface {
+ GetAll(ctx context.Context, resourceGroupName string, solutionName string) (result security.IoTSecuritySolutionAnalyticsModelList, err error)
+ GetDefault(ctx context.Context, resourceGroupName string, solutionName string) (result security.IoTSecuritySolutionAnalyticsModel, err error)
+}
+
+var _ IoTSecuritySolutionsAnalyticsClientAPI = (*security.IoTSecuritySolutionsAnalyticsClient)(nil)
+
+// IoTSecuritySolutionsAnalyticsAggregatedAlertsClientAPI contains the set of methods on the IoTSecuritySolutionsAnalyticsAggregatedAlertsClient type.
+type IoTSecuritySolutionsAnalyticsAggregatedAlertsClientAPI interface {
+ List(ctx context.Context, resourceGroupName string, solutionName string, top *int32) (result security.IoTSecurityAggregatedAlertListPage, err error)
+}
+
+var _ IoTSecuritySolutionsAnalyticsAggregatedAlertsClientAPI = (*security.IoTSecuritySolutionsAnalyticsAggregatedAlertsClient)(nil)
+
+// IoTSecuritySolutionsAnalyticsAggregatedAlertClientAPI contains the set of methods on the IoTSecuritySolutionsAnalyticsAggregatedAlertClient type.
+type IoTSecuritySolutionsAnalyticsAggregatedAlertClientAPI interface {
+ Dismiss(ctx context.Context, resourceGroupName string, solutionName string, aggregatedAlertName string) (result autorest.Response, err error)
+ Get(ctx context.Context, resourceGroupName string, solutionName string, aggregatedAlertName string) (result security.IoTSecurityAggregatedAlert, err error)
+}
+
+var _ IoTSecuritySolutionsAnalyticsAggregatedAlertClientAPI = (*security.IoTSecuritySolutionsAnalyticsAggregatedAlertClient)(nil)
+
+// IoTSecuritySolutionsAnalyticsRecommendationClientAPI contains the set of methods on the IoTSecuritySolutionsAnalyticsRecommendationClient type.
+type IoTSecuritySolutionsAnalyticsRecommendationClientAPI interface {
+ Get(ctx context.Context, resourceGroupName string, solutionName string, aggregatedRecommendationName string) (result security.IoTSecurityAggregatedRecommendation, err error)
+}
+
+var _ IoTSecuritySolutionsAnalyticsRecommendationClientAPI = (*security.IoTSecuritySolutionsAnalyticsRecommendationClient)(nil)
+
+// IoTSecuritySolutionsAnalyticsRecommendationsClientAPI contains the set of methods on the IoTSecuritySolutionsAnalyticsRecommendationsClient type.
+type IoTSecuritySolutionsAnalyticsRecommendationsClientAPI interface {
+ List(ctx context.Context, resourceGroupName string, solutionName string, top *int32) (result security.IoTSecurityAggregatedRecommendationListPage, err error)
+}
+
+var _ IoTSecuritySolutionsAnalyticsRecommendationsClientAPI = (*security.IoTSecuritySolutionsAnalyticsRecommendationsClient)(nil)
+
// OperationsClientAPI contains the set of methods on the OperationsClient type.
type OperationsClientAPI interface {
List(ctx context.Context) (result security.OperationListPage, err error)
diff --git a/services/preview/security/mgmt/v3.0/security/iotsecuritysolution.go b/services/preview/security/mgmt/v3.0/security/iotsecuritysolution.go
index 73b1112f75c2..69320fae227d 100644
--- a/services/preview/security/mgmt/v3.0/security/iotsecuritysolution.go
+++ b/services/preview/security/mgmt/v3.0/security/iotsecuritysolution.go
@@ -107,7 +107,7 @@ func (client IotSecuritySolutionClient) CreatePreparer(ctx context.Context, reso
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2017-08-01-preview"
+ const APIVersion = "2019-08-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -200,7 +200,7 @@ func (client IotSecuritySolutionClient) DeletePreparer(ctx context.Context, reso
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2017-08-01-preview"
+ const APIVersion = "2019-08-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -287,7 +287,7 @@ func (client IotSecuritySolutionClient) GetPreparer(ctx context.Context, resourc
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2017-08-01-preview"
+ const APIVersion = "2019-08-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -377,7 +377,7 @@ func (client IotSecuritySolutionClient) UpdatePreparer(ctx context.Context, reso
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2017-08-01-preview"
+ const APIVersion = "2019-08-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/services/preview/security/mgmt/v3.0/security/iotsecuritysolutions.go b/services/preview/security/mgmt/v3.0/security/iotsecuritysolutions.go
index 14fbfd273a72..03b256dad4c7 100644
--- a/services/preview/security/mgmt/v3.0/security/iotsecuritysolutions.go
+++ b/services/preview/security/mgmt/v3.0/security/iotsecuritysolutions.go
@@ -89,7 +89,7 @@ func (client IoTSecuritySolutionsClient) ListPreparer(ctx context.Context, filte
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2017-08-01-preview"
+ const APIVersion = "2019-08-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/services/preview/security/mgmt/v3.0/security/iotsecuritysolutionsanalytics.go b/services/preview/security/mgmt/v3.0/security/iotsecuritysolutionsanalytics.go
index d242868be31b..22ce11631743 100644
--- a/services/preview/security/mgmt/v3.0/security/iotsecuritysolutionsanalytics.go
+++ b/services/preview/security/mgmt/v3.0/security/iotsecuritysolutionsanalytics.go
@@ -97,7 +97,7 @@ func (client IoTSecuritySolutionsAnalyticsClient) GetAllPreparer(ctx context.Con
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2017-08-01-preview"
+ const APIVersion = "2019-08-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -185,7 +185,7 @@ func (client IoTSecuritySolutionsAnalyticsClient) GetDefaultPreparer(ctx context
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2017-08-01-preview"
+ const APIVersion = "2019-08-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/services/preview/security/mgmt/v3.0/security/iotsecuritysolutionsanalyticsaggregatedalert.go b/services/preview/security/mgmt/v3.0/security/iotsecuritysolutionsanalyticsaggregatedalert.go
index 07196e670723..27eddd6c8c14 100644
--- a/services/preview/security/mgmt/v3.0/security/iotsecuritysolutionsanalyticsaggregatedalert.go
+++ b/services/preview/security/mgmt/v3.0/security/iotsecuritysolutionsanalyticsaggregatedalert.go
@@ -101,7 +101,7 @@ func (client IoTSecuritySolutionsAnalyticsAggregatedAlertClient) DismissPreparer
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2017-08-01-preview"
+ const APIVersion = "2019-08-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -190,7 +190,7 @@ func (client IoTSecuritySolutionsAnalyticsAggregatedAlertClient) GetPreparer(ctx
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2017-08-01-preview"
+ const APIVersion = "2019-08-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/services/preview/security/mgmt/v3.0/security/iotsecuritysolutionsanalyticsaggregatedalerts.go b/services/preview/security/mgmt/v3.0/security/iotsecuritysolutionsanalyticsaggregatedalerts.go
index 0e295e14f36b..1cc08c1c364a 100644
--- a/services/preview/security/mgmt/v3.0/security/iotsecuritysolutionsanalyticsaggregatedalerts.go
+++ b/services/preview/security/mgmt/v3.0/security/iotsecuritysolutionsanalyticsaggregatedalerts.go
@@ -101,7 +101,7 @@ func (client IoTSecuritySolutionsAnalyticsAggregatedAlertsClient) ListPreparer(c
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2017-08-01-preview"
+ const APIVersion = "2019-08-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/services/preview/security/mgmt/v3.0/security/iotsecuritysolutionsanalyticsrecommendation.go b/services/preview/security/mgmt/v3.0/security/iotsecuritysolutionsanalyticsrecommendation.go
index ff3fad682371..ead902a83498 100644
--- a/services/preview/security/mgmt/v3.0/security/iotsecuritysolutionsanalyticsrecommendation.go
+++ b/services/preview/security/mgmt/v3.0/security/iotsecuritysolutionsanalyticsrecommendation.go
@@ -101,7 +101,7 @@ func (client IoTSecuritySolutionsAnalyticsRecommendationClient) GetPreparer(ctx
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2017-08-01-preview"
+ const APIVersion = "2019-08-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/services/preview/security/mgmt/v3.0/security/iotsecuritysolutionsanalyticsrecommendations.go b/services/preview/security/mgmt/v3.0/security/iotsecuritysolutionsanalyticsrecommendations.go
index fbda8ca7f69e..0a4201655260 100644
--- a/services/preview/security/mgmt/v3.0/security/iotsecuritysolutionsanalyticsrecommendations.go
+++ b/services/preview/security/mgmt/v3.0/security/iotsecuritysolutionsanalyticsrecommendations.go
@@ -101,7 +101,7 @@ func (client IoTSecuritySolutionsAnalyticsRecommendationsClient) ListPreparer(ct
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2017-08-01-preview"
+ const APIVersion = "2019-08-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/services/preview/security/mgmt/v3.0/security/iotsecuritysolutionsresourcegroup.go b/services/preview/security/mgmt/v3.0/security/iotsecuritysolutionsresourcegroup.go
index 71faba118a1e..a2214a0bdf94 100644
--- a/services/preview/security/mgmt/v3.0/security/iotsecuritysolutionsresourcegroup.go
+++ b/services/preview/security/mgmt/v3.0/security/iotsecuritysolutionsresourcegroup.go
@@ -99,7 +99,7 @@ func (client IoTSecuritySolutionsResourceGroupClient) ListPreparer(ctx context.C
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2017-08-01-preview"
+ const APIVersion = "2019-08-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
diff --git a/services/preview/security/mgmt/v3.0/security/models.go b/services/preview/security/mgmt/v3.0/security/models.go
index d9aa8af052a7..5e32802273f0 100644
--- a/services/preview/security/mgmt/v3.0/security/models.go
+++ b/services/preview/security/mgmt/v3.0/security/models.go
@@ -4529,8 +4529,6 @@ type IoTSecurityAlertedDevice struct {
type IoTSecurityAlertedDevicesList struct {
// Value - List of aggregated alerts data
Value *[]IoTSecurityAlertedDevice `json:"value,omitempty"`
- // NextLink - READ-ONLY; The URI to fetch the next page.
- NextLink *string `json:"nextLink,omitempty"`
}
// IoTSecurityDeviceAlert statistic information about the number of alerts per alert type during the last
@@ -4567,8 +4565,6 @@ type IoTSecurityDeviceRecommendation struct {
type IoTSecurityDeviceRecommendationsList struct {
// Value - List of aggregated recommendation data
Value *[]IoTSecurityDeviceRecommendation `json:"value,omitempty"`
- // NextLink - READ-ONLY; The URI to fetch the next page.
- NextLink *string `json:"nextLink,omitempty"`
}
// IoTSecuritySolutionAnalyticsModel security Analytics of a security solution
@@ -5486,7 +5482,9 @@ type PathRecommendation struct {
ConfigurationStatus ConfigurationStatus1 `json:"configurationStatus,omitempty"`
}
-// Pricing pricing tier will be applied for the scope based on the resource ID
+// Pricing azure Security Center is provided in two pricing tiers: free and standard, with the standard
+// tier available with a trial period. The standard tier offers advanced security capabilities, while the
+// free tier offers basic security features.
type Pricing struct {
autorest.Response `json:"-"`
// PricingProperties - Pricing data
@@ -5559,7 +5557,7 @@ func (p *Pricing) UnmarshalJSON(body []byte) error {
return nil
}
-// PricingList list of pricing configurations response
+// PricingList list of pricing configurations response.
type PricingList struct {
autorest.Response `json:"-"`
// Value - List of pricing configurations
diff --git a/services/preview/security/mgmt/v3.0/security/pricings.go b/services/preview/security/mgmt/v3.0/security/pricings.go
index 07566049a986..d43a4ac71fb5 100644
--- a/services/preview/security/mgmt/v3.0/security/pricings.go
+++ b/services/preview/security/mgmt/v3.0/security/pricings.go
@@ -41,9 +41,7 @@ func NewPricingsClientWithBaseURI(baseURI string, subscriptionID string, ascLoca
return PricingsClient{NewWithBaseURI(baseURI, subscriptionID, ascLocation)}
}
-// Get a given security pricing configuration in the subscription. Azure Security Center is available in two pricing
-// tiers: Free and Standard, on multiple resource types, including Virtual machines, SQL Servers, App service plans and
-// Storage accounts.
+// Get gets a provided Security Center pricing configuration in the subscription.
// Parameters:
// pricingName - name of the pricing configuration
func (client PricingsClient) Get(ctx context.Context, pricingName string) (result Pricing, err error) {
@@ -124,9 +122,7 @@ func (client PricingsClient) GetResponder(resp *http.Response) (result Pricing,
return
}
-// List a given security pricing configuration in the subscription. Azure Security Center is available in two pricing
-// tiers: Free and Standard, on multiple resource types, including Virtual machines, SQL Servers, App service plans and
-// Storage accounts.
+// List lists Security Center pricing configurations in the subscription.
func (client PricingsClient) List(ctx context.Context) (result PricingList, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/PricingsClient.List")
@@ -204,9 +200,7 @@ func (client PricingsClient) ListResponder(resp *http.Response) (result PricingL
return
}
-// Update a given security pricing configuration in the subscription. Azure Security Center is available in two pricing
-// tiers: Free and Standard, on multiple resource types, including Virtual machines, SQL Servers, App service plans and
-// Storage accounts.
+// Update updates a provided Security Center pricing configuration in the subscription.
// Parameters:
// pricingName - name of the pricing configuration
// pricing - pricing object
diff --git a/services/preview/security/mgmt/v3.0/security/securityapi/interfaces.go b/services/preview/security/mgmt/v3.0/security/securityapi/interfaces.go
index 573a51be7f5f..c795ece3011d 100644
--- a/services/preview/security/mgmt/v3.0/security/securityapi/interfaces.go
+++ b/services/preview/security/mgmt/v3.0/security/securityapi/interfaces.go
@@ -63,6 +63,67 @@ type SettingsClientAPI interface {
var _ SettingsClientAPI = (*security.SettingsClient)(nil)
+// IoTSecuritySolutionsClientAPI contains the set of methods on the IoTSecuritySolutionsClient type.
+type IoTSecuritySolutionsClientAPI interface {
+ List(ctx context.Context, filter string) (result security.IoTSecuritySolutionsListPage, err error)
+}
+
+var _ IoTSecuritySolutionsClientAPI = (*security.IoTSecuritySolutionsClient)(nil)
+
+// IoTSecuritySolutionsResourceGroupClientAPI contains the set of methods on the IoTSecuritySolutionsResourceGroupClient type.
+type IoTSecuritySolutionsResourceGroupClientAPI interface {
+ List(ctx context.Context, resourceGroupName string, filter string) (result security.IoTSecuritySolutionsListPage, err error)
+}
+
+var _ IoTSecuritySolutionsResourceGroupClientAPI = (*security.IoTSecuritySolutionsResourceGroupClient)(nil)
+
+// IotSecuritySolutionClientAPI contains the set of methods on the IotSecuritySolutionClient type.
+type IotSecuritySolutionClientAPI interface {
+ Create(ctx context.Context, resourceGroupName string, solutionName string, iotSecuritySolutionData security.IoTSecuritySolutionModel) (result security.IoTSecuritySolutionModel, err error)
+ Delete(ctx context.Context, resourceGroupName string, solutionName string) (result autorest.Response, err error)
+ Get(ctx context.Context, resourceGroupName string, solutionName string) (result security.IoTSecuritySolutionModel, err error)
+ Update(ctx context.Context, resourceGroupName string, solutionName string, updateIotSecuritySolutionData security.UpdateIotSecuritySolutionData) (result security.IoTSecuritySolutionModel, err error)
+}
+
+var _ IotSecuritySolutionClientAPI = (*security.IotSecuritySolutionClient)(nil)
+
+// IoTSecuritySolutionsAnalyticsClientAPI contains the set of methods on the IoTSecuritySolutionsAnalyticsClient type.
+type IoTSecuritySolutionsAnalyticsClientAPI interface {
+ GetAll(ctx context.Context, resourceGroupName string, solutionName string) (result security.IoTSecuritySolutionAnalyticsModelList, err error)
+ GetDefault(ctx context.Context, resourceGroupName string, solutionName string) (result security.IoTSecuritySolutionAnalyticsModel, err error)
+}
+
+var _ IoTSecuritySolutionsAnalyticsClientAPI = (*security.IoTSecuritySolutionsAnalyticsClient)(nil)
+
+// IoTSecuritySolutionsAnalyticsAggregatedAlertsClientAPI contains the set of methods on the IoTSecuritySolutionsAnalyticsAggregatedAlertsClient type.
+type IoTSecuritySolutionsAnalyticsAggregatedAlertsClientAPI interface {
+ List(ctx context.Context, resourceGroupName string, solutionName string, top *int32) (result security.IoTSecurityAggregatedAlertListPage, err error)
+}
+
+var _ IoTSecuritySolutionsAnalyticsAggregatedAlertsClientAPI = (*security.IoTSecuritySolutionsAnalyticsAggregatedAlertsClient)(nil)
+
+// IoTSecuritySolutionsAnalyticsAggregatedAlertClientAPI contains the set of methods on the IoTSecuritySolutionsAnalyticsAggregatedAlertClient type.
+type IoTSecuritySolutionsAnalyticsAggregatedAlertClientAPI interface {
+ Dismiss(ctx context.Context, resourceGroupName string, solutionName string, aggregatedAlertName string) (result autorest.Response, err error)
+ Get(ctx context.Context, resourceGroupName string, solutionName string, aggregatedAlertName string) (result security.IoTSecurityAggregatedAlert, err error)
+}
+
+var _ IoTSecuritySolutionsAnalyticsAggregatedAlertClientAPI = (*security.IoTSecuritySolutionsAnalyticsAggregatedAlertClient)(nil)
+
+// IoTSecuritySolutionsAnalyticsRecommendationClientAPI contains the set of methods on the IoTSecuritySolutionsAnalyticsRecommendationClient type.
+type IoTSecuritySolutionsAnalyticsRecommendationClientAPI interface {
+ Get(ctx context.Context, resourceGroupName string, solutionName string, aggregatedRecommendationName string) (result security.IoTSecurityAggregatedRecommendation, err error)
+}
+
+var _ IoTSecuritySolutionsAnalyticsRecommendationClientAPI = (*security.IoTSecuritySolutionsAnalyticsRecommendationClient)(nil)
+
+// IoTSecuritySolutionsAnalyticsRecommendationsClientAPI contains the set of methods on the IoTSecuritySolutionsAnalyticsRecommendationsClient type.
+type IoTSecuritySolutionsAnalyticsRecommendationsClientAPI interface {
+ List(ctx context.Context, resourceGroupName string, solutionName string, top *int32) (result security.IoTSecurityAggregatedRecommendationListPage, err error)
+}
+
+var _ IoTSecuritySolutionsAnalyticsRecommendationsClientAPI = (*security.IoTSecuritySolutionsAnalyticsRecommendationsClient)(nil)
+
// AllowedConnectionsClientAPI contains the set of methods on the AllowedConnectionsClient type.
type AllowedConnectionsClientAPI interface {
Get(ctx context.Context, resourceGroupName string, connectionType security.ConnectionType) (result security.AllowedConnectionsResource, err error)
@@ -206,67 +267,6 @@ type WorkspaceSettingsClientAPI interface {
var _ WorkspaceSettingsClientAPI = (*security.WorkspaceSettingsClient)(nil)
-// IoTSecuritySolutionsClientAPI contains the set of methods on the IoTSecuritySolutionsClient type.
-type IoTSecuritySolutionsClientAPI interface {
- List(ctx context.Context, filter string) (result security.IoTSecuritySolutionsListPage, err error)
-}
-
-var _ IoTSecuritySolutionsClientAPI = (*security.IoTSecuritySolutionsClient)(nil)
-
-// IoTSecuritySolutionsResourceGroupClientAPI contains the set of methods on the IoTSecuritySolutionsResourceGroupClient type.
-type IoTSecuritySolutionsResourceGroupClientAPI interface {
- List(ctx context.Context, resourceGroupName string, filter string) (result security.IoTSecuritySolutionsListPage, err error)
-}
-
-var _ IoTSecuritySolutionsResourceGroupClientAPI = (*security.IoTSecuritySolutionsResourceGroupClient)(nil)
-
-// IotSecuritySolutionClientAPI contains the set of methods on the IotSecuritySolutionClient type.
-type IotSecuritySolutionClientAPI interface {
- Create(ctx context.Context, resourceGroupName string, solutionName string, iotSecuritySolutionData security.IoTSecuritySolutionModel) (result security.IoTSecuritySolutionModel, err error)
- Delete(ctx context.Context, resourceGroupName string, solutionName string) (result autorest.Response, err error)
- Get(ctx context.Context, resourceGroupName string, solutionName string) (result security.IoTSecuritySolutionModel, err error)
- Update(ctx context.Context, resourceGroupName string, solutionName string, updateIotSecuritySolutionData security.UpdateIotSecuritySolutionData) (result security.IoTSecuritySolutionModel, err error)
-}
-
-var _ IotSecuritySolutionClientAPI = (*security.IotSecuritySolutionClient)(nil)
-
-// IoTSecuritySolutionsAnalyticsClientAPI contains the set of methods on the IoTSecuritySolutionsAnalyticsClient type.
-type IoTSecuritySolutionsAnalyticsClientAPI interface {
- GetAll(ctx context.Context, resourceGroupName string, solutionName string) (result security.IoTSecuritySolutionAnalyticsModelList, err error)
- GetDefault(ctx context.Context, resourceGroupName string, solutionName string) (result security.IoTSecuritySolutionAnalyticsModel, err error)
-}
-
-var _ IoTSecuritySolutionsAnalyticsClientAPI = (*security.IoTSecuritySolutionsAnalyticsClient)(nil)
-
-// IoTSecuritySolutionsAnalyticsAggregatedAlertsClientAPI contains the set of methods on the IoTSecuritySolutionsAnalyticsAggregatedAlertsClient type.
-type IoTSecuritySolutionsAnalyticsAggregatedAlertsClientAPI interface {
- List(ctx context.Context, resourceGroupName string, solutionName string, top *int32) (result security.IoTSecurityAggregatedAlertListPage, err error)
-}
-
-var _ IoTSecuritySolutionsAnalyticsAggregatedAlertsClientAPI = (*security.IoTSecuritySolutionsAnalyticsAggregatedAlertsClient)(nil)
-
-// IoTSecuritySolutionsAnalyticsAggregatedAlertClientAPI contains the set of methods on the IoTSecuritySolutionsAnalyticsAggregatedAlertClient type.
-type IoTSecuritySolutionsAnalyticsAggregatedAlertClientAPI interface {
- Dismiss(ctx context.Context, resourceGroupName string, solutionName string, aggregatedAlertName string) (result autorest.Response, err error)
- Get(ctx context.Context, resourceGroupName string, solutionName string, aggregatedAlertName string) (result security.IoTSecurityAggregatedAlert, err error)
-}
-
-var _ IoTSecuritySolutionsAnalyticsAggregatedAlertClientAPI = (*security.IoTSecuritySolutionsAnalyticsAggregatedAlertClient)(nil)
-
-// IoTSecuritySolutionsAnalyticsRecommendationClientAPI contains the set of methods on the IoTSecuritySolutionsAnalyticsRecommendationClient type.
-type IoTSecuritySolutionsAnalyticsRecommendationClientAPI interface {
- Get(ctx context.Context, resourceGroupName string, solutionName string, aggregatedRecommendationName string) (result security.IoTSecurityAggregatedRecommendation, err error)
-}
-
-var _ IoTSecuritySolutionsAnalyticsRecommendationClientAPI = (*security.IoTSecuritySolutionsAnalyticsRecommendationClient)(nil)
-
-// IoTSecuritySolutionsAnalyticsRecommendationsClientAPI contains the set of methods on the IoTSecuritySolutionsAnalyticsRecommendationsClient type.
-type IoTSecuritySolutionsAnalyticsRecommendationsClientAPI interface {
- List(ctx context.Context, resourceGroupName string, solutionName string, top *int32) (result security.IoTSecurityAggregatedRecommendationListPage, err error)
-}
-
-var _ IoTSecuritySolutionsAnalyticsRecommendationsClientAPI = (*security.IoTSecuritySolutionsAnalyticsRecommendationsClient)(nil)
-
// RegulatoryComplianceStandardsClientAPI contains the set of methods on the RegulatoryComplianceStandardsClient type.
type RegulatoryComplianceStandardsClientAPI interface {
Get(ctx context.Context, regulatoryComplianceStandardName string) (result security.RegulatoryComplianceStandard, err error)
diff --git a/services/preview/securityinsight/mgmt/2017-08-01-preview/securityinsight/alertruletemplates.go b/services/preview/securityinsight/mgmt/2017-08-01-preview/securityinsight/alertruletemplates.go
new file mode 100644
index 000000000000..c9d00301a65b
--- /dev/null
+++ b/services/preview/securityinsight/mgmt/2017-08-01-preview/securityinsight/alertruletemplates.go
@@ -0,0 +1,270 @@
+package securityinsight
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// AlertRuleTemplatesClient is the API spec for Microsoft.SecurityInsights (Azure Security Insights) resource provider
+type AlertRuleTemplatesClient struct {
+ BaseClient
+}
+
+// NewAlertRuleTemplatesClient creates an instance of the AlertRuleTemplatesClient client.
+func NewAlertRuleTemplatesClient(subscriptionID string) AlertRuleTemplatesClient {
+ return NewAlertRuleTemplatesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewAlertRuleTemplatesClientWithBaseURI creates an instance of the AlertRuleTemplatesClient client.
+func NewAlertRuleTemplatesClientWithBaseURI(baseURI string, subscriptionID string) AlertRuleTemplatesClient {
+ return AlertRuleTemplatesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// Get gets the alert rule template.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// operationalInsightsResourceProvider - the namespace of workspaces resource provider-
+// Microsoft.OperationalInsights.
+// workspaceName - the name of the workspace.
+// alertRuleTemplateID - alert rule template ID
+func (client AlertRuleTemplatesClient) Get(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string, alertRuleTemplateID string) (result AlertRuleTemplateModel, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AlertRuleTemplatesClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.Pattern, Rule: `^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$`, Chain: nil}}},
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: workspaceName,
+ Constraints: []validation.Constraint{{Target: "workspaceName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "workspaceName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("securityinsight.AlertRuleTemplatesClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, operationalInsightsResourceProvider, workspaceName, alertRuleTemplateID)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "securityinsight.AlertRuleTemplatesClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "securityinsight.AlertRuleTemplatesClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "securityinsight.AlertRuleTemplatesClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client AlertRuleTemplatesClient) GetPreparer(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string, alertRuleTemplateID string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "alertRuleTemplateId": autorest.Encode("path", alertRuleTemplateID),
+ "operationalInsightsResourceProvider": autorest.Encode("path", operationalInsightsResourceProvider),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "workspaceName": autorest.Encode("path", workspaceName),
+ }
+
+ const APIVersion = "2019-01-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{operationalInsightsResourceProvider}/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/alertRuleTemplates/{alertRuleTemplateId}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client AlertRuleTemplatesClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client AlertRuleTemplatesClient) GetResponder(resp *http.Response) (result AlertRuleTemplateModel, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List gets all alert rule templates.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// operationalInsightsResourceProvider - the namespace of workspaces resource provider-
+// Microsoft.OperationalInsights.
+// workspaceName - the name of the workspace.
+func (client AlertRuleTemplatesClient) List(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string) (result AlertRuleTemplatesListPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AlertRuleTemplatesClient.List")
+ defer func() {
+ sc := -1
+ if result.artl.Response.Response != nil {
+ sc = result.artl.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.Pattern, Rule: `^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$`, Chain: nil}}},
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: workspaceName,
+ Constraints: []validation.Constraint{{Target: "workspaceName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "workspaceName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("securityinsight.AlertRuleTemplatesClient", "List", err.Error())
+ }
+
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx, resourceGroupName, operationalInsightsResourceProvider, workspaceName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "securityinsight.AlertRuleTemplatesClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.artl.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "securityinsight.AlertRuleTemplatesClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.artl, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "securityinsight.AlertRuleTemplatesClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client AlertRuleTemplatesClient) ListPreparer(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "operationalInsightsResourceProvider": autorest.Encode("path", operationalInsightsResourceProvider),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "workspaceName": autorest.Encode("path", workspaceName),
+ }
+
+ const APIVersion = "2019-01-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{operationalInsightsResourceProvider}/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/alertRuleTemplates", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client AlertRuleTemplatesClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client AlertRuleTemplatesClient) ListResponder(resp *http.Response) (result AlertRuleTemplatesList, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client AlertRuleTemplatesClient) listNextResults(ctx context.Context, lastResults AlertRuleTemplatesList) (result AlertRuleTemplatesList, err error) {
+ req, err := lastResults.alertRuleTemplatesListPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "securityinsight.AlertRuleTemplatesClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "securityinsight.AlertRuleTemplatesClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "securityinsight.AlertRuleTemplatesClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client AlertRuleTemplatesClient) ListComplete(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string) (result AlertRuleTemplatesListIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AlertRuleTemplatesClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, resourceGroupName, operationalInsightsResourceProvider, workspaceName)
+ return
+}
diff --git a/services/preview/securityinsight/mgmt/2017-08-01-preview/securityinsight/bookmarks.go b/services/preview/securityinsight/mgmt/2017-08-01-preview/securityinsight/bookmarks.go
index 6b2677e3e20f..d324696ba926 100644
--- a/services/preview/securityinsight/mgmt/2017-08-01-preview/securityinsight/bookmarks.go
+++ b/services/preview/securityinsight/mgmt/2017-08-01-preview/securityinsight/bookmarks.go
@@ -74,6 +74,10 @@ func (client BookmarksClient) CreateOrUpdate(ctx context.Context, resourceGroupN
{TargetValue: bookmark,
Constraints: []validation.Constraint{{Target: "bookmark.BookmarkProperties", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "bookmark.BookmarkProperties.DisplayName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "bookmark.BookmarkProperties.CreatedBy", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "bookmark.BookmarkProperties.CreatedBy.ObjectID", Name: validation.Null, Rule: true, Chain: nil}}},
+ {Target: "bookmark.BookmarkProperties.UpdatedBy", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "bookmark.BookmarkProperties.UpdatedBy.ObjectID", Name: validation.Null, Rule: true, Chain: nil}}},
{Target: "bookmark.BookmarkProperties.Query", Name: validation.Null, Rule: true, Chain: nil},
}}}}}); err != nil {
return result, validation.NewError("securityinsight.BookmarksClient", "CreateOrUpdate", err.Error())
diff --git a/services/preview/securityinsight/mgmt/2017-08-01-preview/securityinsight/casecomments.go b/services/preview/securityinsight/mgmt/2017-08-01-preview/securityinsight/casecomments.go
new file mode 100644
index 000000000000..897d9d02fa33
--- /dev/null
+++ b/services/preview/securityinsight/mgmt/2017-08-01-preview/securityinsight/casecomments.go
@@ -0,0 +1,149 @@
+package securityinsight
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// CaseCommentsClient is the API spec for Microsoft.SecurityInsights (Azure Security Insights) resource provider
+type CaseCommentsClient struct {
+ BaseClient
+}
+
+// NewCaseCommentsClient creates an instance of the CaseCommentsClient client.
+func NewCaseCommentsClient(subscriptionID string) CaseCommentsClient {
+ return NewCaseCommentsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewCaseCommentsClientWithBaseURI creates an instance of the CaseCommentsClient client.
+func NewCaseCommentsClientWithBaseURI(baseURI string, subscriptionID string) CaseCommentsClient {
+ return CaseCommentsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateComment creates the case comment.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// operationalInsightsResourceProvider - the namespace of workspaces resource provider-
+// Microsoft.OperationalInsights.
+// workspaceName - the name of the workspace.
+// caseID - case ID
+// caseCommentID - case comment ID
+// caseComment - the case comment
+func (client CaseCommentsClient) CreateComment(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string, caseID string, caseCommentID string, caseComment CaseComment) (result CaseComment, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CaseCommentsClient.CreateComment")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.Pattern, Rule: `^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$`, Chain: nil}}},
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: workspaceName,
+ Constraints: []validation.Constraint{{Target: "workspaceName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "workspaceName", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: caseComment,
+ Constraints: []validation.Constraint{{Target: "caseComment.CaseCommentProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "caseComment.CaseCommentProperties.Message", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "caseComment.CaseCommentProperties.UserInfo", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "caseComment.CaseCommentProperties.UserInfo.ObjectID", Name: validation.Null, Rule: true, Chain: nil}}},
+ }}}}}); err != nil {
+ return result, validation.NewError("securityinsight.CaseCommentsClient", "CreateComment", err.Error())
+ }
+
+ req, err := client.CreateCommentPreparer(ctx, resourceGroupName, operationalInsightsResourceProvider, workspaceName, caseID, caseCommentID, caseComment)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "securityinsight.CaseCommentsClient", "CreateComment", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateCommentSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "securityinsight.CaseCommentsClient", "CreateComment", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateCommentResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "securityinsight.CaseCommentsClient", "CreateComment", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreateCommentPreparer prepares the CreateComment request.
+func (client CaseCommentsClient) CreateCommentPreparer(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string, caseID string, caseCommentID string, caseComment CaseComment) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "caseCommentId": autorest.Encode("path", caseCommentID),
+ "caseId": autorest.Encode("path", caseID),
+ "operationalInsightsResourceProvider": autorest.Encode("path", operationalInsightsResourceProvider),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "workspaceName": autorest.Encode("path", workspaceName),
+ }
+
+ const APIVersion = "2019-01-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{operationalInsightsResourceProvider}/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/cases/{caseId}/comments/{caseCommentId}", pathParameters),
+ autorest.WithJSON(caseComment),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateCommentSender sends the CreateComment request. The method will close the
+// http.Response Body if it receives an error.
+func (client CaseCommentsClient) CreateCommentSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CreateCommentResponder handles the response to the CreateComment request. The method always
+// closes the http.Response Body.
+func (client CaseCommentsClient) CreateCommentResponder(resp *http.Response) (result CaseComment, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/preview/securityinsight/mgmt/2017-08-01-preview/securityinsight/cases.go b/services/preview/securityinsight/mgmt/2017-08-01-preview/securityinsight/cases.go
index 9a4eb0d79790..5f4bc4d1c106 100644
--- a/services/preview/securityinsight/mgmt/2017-08-01-preview/securityinsight/cases.go
+++ b/services/preview/securityinsight/mgmt/2017-08-01-preview/securityinsight/cases.go
@@ -73,7 +73,11 @@ func (client CasesClient) CreateOrUpdate(ctx context.Context, resourceGroupName
{Target: "workspaceName", Name: validation.MinLength, Rule: 1, Chain: nil}}},
{TargetValue: caseParameter,
Constraints: []validation.Constraint{{Target: "caseParameter.CaseProperties", Name: validation.Null, Rule: false,
- Chain: []validation.Constraint{{Target: "caseParameter.CaseProperties.Title", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil {
+ Chain: []validation.Constraint{{Target: "caseParameter.CaseProperties.StartTimeUtc", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "caseParameter.CaseProperties.Title", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "caseParameter.CaseProperties.Owner", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "caseParameter.CaseProperties.Owner.ObjectID", Name: validation.Null, Rule: true, Chain: nil}}},
+ }}}}}); err != nil {
return result, validation.NewError("securityinsight.CasesClient", "CreateOrUpdate", err.Error())
}
@@ -334,6 +338,104 @@ func (client CasesClient) GetResponder(resp *http.Response) (result Case, err er
return
}
+// GetComment gets a case comment.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// operationalInsightsResourceProvider - the namespace of workspaces resource provider-
+// Microsoft.OperationalInsights.
+// workspaceName - the name of the workspace.
+// caseID - case ID
+// caseCommentID - case comment ID
+func (client CasesClient) GetComment(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string, caseID string, caseCommentID string) (result CaseComment, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CasesClient.GetComment")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.Pattern, Rule: `^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$`, Chain: nil}}},
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: workspaceName,
+ Constraints: []validation.Constraint{{Target: "workspaceName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "workspaceName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("securityinsight.CasesClient", "GetComment", err.Error())
+ }
+
+ req, err := client.GetCommentPreparer(ctx, resourceGroupName, operationalInsightsResourceProvider, workspaceName, caseID, caseCommentID)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "securityinsight.CasesClient", "GetComment", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetCommentSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "securityinsight.CasesClient", "GetComment", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetCommentResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "securityinsight.CasesClient", "GetComment", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetCommentPreparer prepares the GetComment request.
+func (client CasesClient) GetCommentPreparer(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string, caseID string, caseCommentID string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "caseCommentId": autorest.Encode("path", caseCommentID),
+ "caseId": autorest.Encode("path", caseID),
+ "operationalInsightsResourceProvider": autorest.Encode("path", operationalInsightsResourceProvider),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "workspaceName": autorest.Encode("path", workspaceName),
+ }
+
+ const APIVersion = "2019-01-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{operationalInsightsResourceProvider}/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/cases/{caseId}/comments/{caseCommentId}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetCommentSender sends the GetComment request. The method will close the
+// http.Response Body if it receives an error.
+func (client CasesClient) GetCommentSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetCommentResponder handles the response to the GetComment request. The method always
+// closes the http.Response Body.
+func (client CasesClient) GetCommentResponder(resp *http.Response) (result CaseComment, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
// List gets all cases.
// Parameters:
// resourceGroupName - the name of the resource group within the user's subscription. The name is case
@@ -341,7 +443,13 @@ func (client CasesClient) GetResponder(resp *http.Response) (result Case, err er
// operationalInsightsResourceProvider - the namespace of workspaces resource provider-
// Microsoft.OperationalInsights.
// workspaceName - the name of the workspace.
-func (client CasesClient) List(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string) (result CaseListPage, err error) {
+// filter - filters the results, based on a Boolean condition. Optional.
+// orderby - sorts the results. Optional.
+// top - returns only the first n results. Optional.
+// skipToken - skiptoken is only used if a previous operation returned a partial result. If a previous response
+// contains a nextLink element, the value of the nextLink element will include a skiptoken parameter that
+// specifies a starting point to use for subsequent calls. Optional.
+func (client CasesClient) List(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string, filter string, orderby string, top *int32, skipToken string) (result CaseListPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/CasesClient.List")
defer func() {
@@ -366,7 +474,7 @@ func (client CasesClient) List(ctx context.Context, resourceGroupName string, op
}
result.fn = client.listNextResults
- req, err := client.ListPreparer(ctx, resourceGroupName, operationalInsightsResourceProvider, workspaceName)
+ req, err := client.ListPreparer(ctx, resourceGroupName, operationalInsightsResourceProvider, workspaceName, filter, orderby, top, skipToken)
if err != nil {
err = autorest.NewErrorWithError(err, "securityinsight.CasesClient", "List", nil, "Failure preparing request")
return
@@ -388,7 +496,7 @@ func (client CasesClient) List(ctx context.Context, resourceGroupName string, op
}
// ListPreparer prepares the List request.
-func (client CasesClient) ListPreparer(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string) (*http.Request, error) {
+func (client CasesClient) ListPreparer(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string, filter string, orderby string, top *int32, skipToken string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"operationalInsightsResourceProvider": autorest.Encode("path", operationalInsightsResourceProvider),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
@@ -400,6 +508,18 @@ func (client CasesClient) ListPreparer(ctx context.Context, resourceGroupName st
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
+ if len(orderby) > 0 {
+ queryParameters["$orderby"] = autorest.Encode("query", orderby)
+ }
+ if top != nil {
+ queryParameters["$top"] = autorest.Encode("query", *top)
+ }
+ if len(skipToken) > 0 {
+ queryParameters["$skipToken"] = autorest.Encode("query", skipToken)
+ }
preparer := autorest.CreatePreparer(
autorest.AsGet(),
@@ -451,7 +571,7 @@ func (client CasesClient) listNextResults(ctx context.Context, lastResults CaseL
}
// ListComplete enumerates all values, automatically crossing page boundaries as required.
-func (client CasesClient) ListComplete(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string) (result CaseListIterator, err error) {
+func (client CasesClient) ListComplete(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string, filter string, orderby string, top *int32, skipToken string) (result CaseListIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/CasesClient.List")
defer func() {
@@ -462,6 +582,6 @@ func (client CasesClient) ListComplete(ctx context.Context, resourceGroupName st
tracing.EndSpan(ctx, sc, err)
}()
}
- result.page, err = client.List(ctx, resourceGroupName, operationalInsightsResourceProvider, workspaceName)
+ result.page, err = client.List(ctx, resourceGroupName, operationalInsightsResourceProvider, workspaceName, filter, orderby, top, skipToken)
return
}
diff --git a/services/preview/securityinsight/mgmt/2017-08-01-preview/securityinsight/comments.go b/services/preview/securityinsight/mgmt/2017-08-01-preview/securityinsight/comments.go
new file mode 100644
index 000000000000..c9d4578f02c0
--- /dev/null
+++ b/services/preview/securityinsight/mgmt/2017-08-01-preview/securityinsight/comments.go
@@ -0,0 +1,194 @@
+package securityinsight
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// CommentsClient is the API spec for Microsoft.SecurityInsights (Azure Security Insights) resource provider
+type CommentsClient struct {
+ BaseClient
+}
+
+// NewCommentsClient creates an instance of the CommentsClient client.
+func NewCommentsClient(subscriptionID string) CommentsClient {
+ return NewCommentsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewCommentsClientWithBaseURI creates an instance of the CommentsClient client.
+func NewCommentsClientWithBaseURI(baseURI string, subscriptionID string) CommentsClient {
+ return CommentsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// ListByCase gets all case comments.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// operationalInsightsResourceProvider - the namespace of workspaces resource provider-
+// Microsoft.OperationalInsights.
+// workspaceName - the name of the workspace.
+// caseID - case ID
+// filter - filters the results, based on a Boolean condition. Optional.
+// orderby - sorts the results. Optional.
+// top - returns only the first n results. Optional.
+// skipToken - skiptoken is only used if a previous operation returned a partial result. If a previous response
+// contains a nextLink element, the value of the nextLink element will include a skiptoken parameter that
+// specifies a starting point to use for subsequent calls. Optional.
+func (client CommentsClient) ListByCase(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string, caseID string, filter string, orderby string, top *int32, skipToken string) (result CaseCommentListPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CommentsClient.ListByCase")
+ defer func() {
+ sc := -1
+ if result.ccl.Response.Response != nil {
+ sc = result.ccl.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.Pattern, Rule: `^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$`, Chain: nil}}},
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: workspaceName,
+ Constraints: []validation.Constraint{{Target: "workspaceName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "workspaceName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("securityinsight.CommentsClient", "ListByCase", err.Error())
+ }
+
+ result.fn = client.listByCaseNextResults
+ req, err := client.ListByCasePreparer(ctx, resourceGroupName, operationalInsightsResourceProvider, workspaceName, caseID, filter, orderby, top, skipToken)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "securityinsight.CommentsClient", "ListByCase", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByCaseSender(req)
+ if err != nil {
+ result.ccl.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "securityinsight.CommentsClient", "ListByCase", resp, "Failure sending request")
+ return
+ }
+
+ result.ccl, err = client.ListByCaseResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "securityinsight.CommentsClient", "ListByCase", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByCasePreparer prepares the ListByCase request.
+func (client CommentsClient) ListByCasePreparer(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string, caseID string, filter string, orderby string, top *int32, skipToken string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "caseId": autorest.Encode("path", caseID),
+ "operationalInsightsResourceProvider": autorest.Encode("path", operationalInsightsResourceProvider),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "workspaceName": autorest.Encode("path", workspaceName),
+ }
+
+ const APIVersion = "2019-01-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
+ if len(orderby) > 0 {
+ queryParameters["$orderby"] = autorest.Encode("query", orderby)
+ }
+ if top != nil {
+ queryParameters["$top"] = autorest.Encode("query", *top)
+ }
+ if len(skipToken) > 0 {
+ queryParameters["$skipToken"] = autorest.Encode("query", skipToken)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{operationalInsightsResourceProvider}/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/cases/{caseId}/comments", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByCaseSender sends the ListByCase request. The method will close the
+// http.Response Body if it receives an error.
+func (client CommentsClient) ListByCaseSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByCaseResponder handles the response to the ListByCase request. The method always
+// closes the http.Response Body.
+func (client CommentsClient) ListByCaseResponder(resp *http.Response) (result CaseCommentList, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByCaseNextResults retrieves the next set of results, if any.
+func (client CommentsClient) listByCaseNextResults(ctx context.Context, lastResults CaseCommentList) (result CaseCommentList, err error) {
+ req, err := lastResults.caseCommentListPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "securityinsight.CommentsClient", "listByCaseNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByCaseSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "securityinsight.CommentsClient", "listByCaseNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByCaseResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "securityinsight.CommentsClient", "listByCaseNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByCaseComplete enumerates all values, automatically crossing page boundaries as required.
+func (client CommentsClient) ListByCaseComplete(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string, caseID string, filter string, orderby string, top *int32, skipToken string) (result CaseCommentListIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CommentsClient.ListByCase")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByCase(ctx, resourceGroupName, operationalInsightsResourceProvider, workspaceName, caseID, filter, orderby, top, skipToken)
+ return
+}
diff --git a/services/preview/securityinsight/mgmt/2017-08-01-preview/securityinsight/entities.go b/services/preview/securityinsight/mgmt/2017-08-01-preview/securityinsight/entities.go
index e72cffc3e2c3..f0e21ffe611a 100644
--- a/services/preview/securityinsight/mgmt/2017-08-01-preview/securityinsight/entities.go
+++ b/services/preview/securityinsight/mgmt/2017-08-01-preview/securityinsight/entities.go
@@ -41,6 +41,105 @@ func NewEntitiesClientWithBaseURI(baseURI string, subscriptionID string) Entitie
return EntitiesClient{NewWithBaseURI(baseURI, subscriptionID)}
}
+// Expand expands an entity.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// operationalInsightsResourceProvider - the namespace of workspaces resource provider-
+// Microsoft.OperationalInsights.
+// workspaceName - the name of the workspace.
+// entityID - entity ID
+// parameters - the parameters required to execute an expand operation on the given entity.
+func (client EntitiesClient) Expand(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string, entityID string, parameters EntityExpandParameters) (result EntityExpandResponse, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/EntitiesClient.Expand")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.Pattern, Rule: `^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$`, Chain: nil}}},
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: workspaceName,
+ Constraints: []validation.Constraint{{Target: "workspaceName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "workspaceName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("securityinsight.EntitiesClient", "Expand", err.Error())
+ }
+
+ req, err := client.ExpandPreparer(ctx, resourceGroupName, operationalInsightsResourceProvider, workspaceName, entityID, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "securityinsight.EntitiesClient", "Expand", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ExpandSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "securityinsight.EntitiesClient", "Expand", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ExpandResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "securityinsight.EntitiesClient", "Expand", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ExpandPreparer prepares the Expand request.
+func (client EntitiesClient) ExpandPreparer(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string, entityID string, parameters EntityExpandParameters) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "entityId": autorest.Encode("path", entityID),
+ "operationalInsightsResourceProvider": autorest.Encode("path", operationalInsightsResourceProvider),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "workspaceName": autorest.Encode("path", workspaceName),
+ }
+
+ const APIVersion = "2019-01-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{operationalInsightsResourceProvider}/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/entities/{entityId}/expand", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ExpandSender sends the Expand request. The method will close the
+// http.Response Body if it receives an error.
+func (client EntitiesClient) ExpandSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ExpandResponder handles the response to the Expand request. The method always
+// closes the http.Response Body.
+func (client EntitiesClient) ExpandResponder(resp *http.Response) (result EntityExpandResponse, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
// Get gets an entity.
// Parameters:
// resourceGroupName - the name of the resource group within the user's subscription. The name is case
diff --git a/services/preview/securityinsight/mgmt/2017-08-01-preview/securityinsight/models.go b/services/preview/securityinsight/mgmt/2017-08-01-preview/securityinsight/models.go
index 3439ef0034bb..a36a38b88823 100644
--- a/services/preview/securityinsight/mgmt/2017-08-01-preview/securityinsight/models.go
+++ b/services/preview/securityinsight/mgmt/2017-08-01-preview/securityinsight/models.go
@@ -48,13 +48,17 @@ func PossibleAggregationsKindValues() []AggregationsKind {
type AlertRuleKind string
const (
+ // Filter ...
+ Filter AlertRuleKind = "Filter"
+ // Fusion ...
+ Fusion AlertRuleKind = "Fusion"
// Scheduled ...
Scheduled AlertRuleKind = "Scheduled"
)
// PossibleAlertRuleKindValues returns an array of possible values for the AlertRuleKind const type.
func PossibleAlertRuleKindValues() []AlertRuleKind {
- return []AlertRuleKind{Scheduled}
+ return []AlertRuleKind{Filter, Fusion, Scheduled}
}
// AlertSeverity enumerates the values for alert severity.
@@ -76,6 +80,60 @@ func PossibleAlertSeverityValues() []AlertSeverity {
return []AlertSeverity{High, Informational, Low, Medium}
}
+// AlertStatus enumerates the values for alert status.
+type AlertStatus string
+
+const (
+ // AlertStatusDismissed Alert dismissed as false positive
+ AlertStatusDismissed AlertStatus = "Dismissed"
+ // AlertStatusInProgress Alert is being handled
+ AlertStatusInProgress AlertStatus = "InProgress"
+ // AlertStatusNew New alert
+ AlertStatusNew AlertStatus = "New"
+ // AlertStatusResolved Alert closed after handling
+ AlertStatusResolved AlertStatus = "Resolved"
+ // AlertStatusUnknown Unknown value
+ AlertStatusUnknown AlertStatus = "Unknown"
+)
+
+// PossibleAlertStatusValues returns an array of possible values for the AlertStatus const type.
+func PossibleAlertStatusValues() []AlertStatus {
+ return []AlertStatus{AlertStatusDismissed, AlertStatusInProgress, AlertStatusNew, AlertStatusResolved, AlertStatusUnknown}
+}
+
+// AttackTactic enumerates the values for attack tactic.
+type AttackTactic string
+
+const (
+ // Collection ...
+ Collection AttackTactic = "Collection"
+ // CommandAndControl ...
+ CommandAndControl AttackTactic = "CommandAndControl"
+ // CredentialAccess ...
+ CredentialAccess AttackTactic = "CredentialAccess"
+ // DefenseEvasion ...
+ DefenseEvasion AttackTactic = "DefenseEvasion"
+ // Discovery ...
+ Discovery AttackTactic = "Discovery"
+ // Execution ...
+ Execution AttackTactic = "Execution"
+ // Exfiltration ...
+ Exfiltration AttackTactic = "Exfiltration"
+ // InitialAccess ...
+ InitialAccess AttackTactic = "InitialAccess"
+ // LateralMovement ...
+ LateralMovement AttackTactic = "LateralMovement"
+ // Persistence ...
+ Persistence AttackTactic = "Persistence"
+ // PrivilegeEscalation ...
+ PrivilegeEscalation AttackTactic = "PrivilegeEscalation"
+)
+
+// PossibleAttackTacticValues returns an array of possible values for the AttackTactic const type.
+func PossibleAttackTacticValues() []AttackTactic {
+ return []AttackTactic{Collection, CommandAndControl, CredentialAccess, DefenseEvasion, Discovery, Execution, Exfiltration, InitialAccess, LateralMovement, Persistence, PrivilegeEscalation}
+}
+
// CaseSeverity enumerates the values for case severity.
type CaseSeverity string
@@ -122,42 +180,84 @@ type CloseReason string
const (
// Dismissed Case was dismissed
Dismissed CloseReason = "Dismissed"
+ // FalsePositive Case was false positive
+ FalsePositive CloseReason = "FalsePositive"
// Other Case was closed for another reason
Other CloseReason = "Other"
// Resolved Case was resolved
Resolved CloseReason = "Resolved"
+ // TruePositive Case was true positive
+ TruePositive CloseReason = "TruePositive"
)
// PossibleCloseReasonValues returns an array of possible values for the CloseReason const type.
func PossibleCloseReasonValues() []CloseReason {
- return []CloseReason{Dismissed, Other, Resolved}
+ return []CloseReason{Dismissed, FalsePositive, Other, Resolved, TruePositive}
+}
+
+// ConfidenceLevel enumerates the values for confidence level.
+type ConfidenceLevel string
+
+const (
+ // ConfidenceLevelHigh High confidence that the alert is true positive malicious
+ ConfidenceLevelHigh ConfidenceLevel = "High"
+ // ConfidenceLevelLow Low confidence, meaning we have some doubts this is indeed malicious or part of an
+ // attack
+ ConfidenceLevelLow ConfidenceLevel = "Low"
+ // ConfidenceLevelUnknown Unknown confidence, the is the default value
+ ConfidenceLevelUnknown ConfidenceLevel = "Unknown"
+)
+
+// PossibleConfidenceLevelValues returns an array of possible values for the ConfidenceLevel const type.
+func PossibleConfidenceLevelValues() []ConfidenceLevel {
+ return []ConfidenceLevel{ConfidenceLevelHigh, ConfidenceLevelLow, ConfidenceLevelUnknown}
+}
+
+// ConfidenceScoreStatus enumerates the values for confidence score status.
+type ConfidenceScoreStatus string
+
+const (
+ // Final Final score was calculated and available
+ Final ConfidenceScoreStatus = "Final"
+ // InProcess No score was set yet and calculation is in progress
+ InProcess ConfidenceScoreStatus = "InProcess"
+ // NotApplicable Score will not be calculated for this alert as it is not supported by virtual analyst
+ NotApplicable ConfidenceScoreStatus = "NotApplicable"
+ // NotFinal Score is calculated and shown as part of the alert, but may be updated again at a later time
+ // following the processing of additional data
+ NotFinal ConfidenceScoreStatus = "NotFinal"
+)
+
+// PossibleConfidenceScoreStatusValues returns an array of possible values for the ConfidenceScoreStatus const type.
+func PossibleConfidenceScoreStatusValues() []ConfidenceScoreStatus {
+ return []ConfidenceScoreStatus{Final, InProcess, NotApplicable, NotFinal}
}
// DataConnectorKind enumerates the values for data connector kind.
type DataConnectorKind string
const (
- // AmazonWebServicesCloudTrail ...
- AmazonWebServicesCloudTrail DataConnectorKind = "AmazonWebServicesCloudTrail"
- // AzureActiveDirectory ...
- AzureActiveDirectory DataConnectorKind = "AzureActiveDirectory"
- // AzureAdvancedThreatProtection ...
- AzureAdvancedThreatProtection DataConnectorKind = "AzureAdvancedThreatProtection"
- // AzureSecurityCenter ...
- AzureSecurityCenter DataConnectorKind = "AzureSecurityCenter"
- // MicrosoftCloudAppSecurity ...
- MicrosoftCloudAppSecurity DataConnectorKind = "MicrosoftCloudAppSecurity"
- // MicrosoftDefenderAdvancedThreatProtection ...
- MicrosoftDefenderAdvancedThreatProtection DataConnectorKind = "MicrosoftDefenderAdvancedThreatProtection"
- // Office365 ...
- Office365 DataConnectorKind = "Office365"
- // ThreatIntelligence ...
- ThreatIntelligence DataConnectorKind = "ThreatIntelligence"
+ // DataConnectorKindAmazonWebServicesCloudTrail ...
+ DataConnectorKindAmazonWebServicesCloudTrail DataConnectorKind = "AmazonWebServicesCloudTrail"
+ // DataConnectorKindAzureActiveDirectory ...
+ DataConnectorKindAzureActiveDirectory DataConnectorKind = "AzureActiveDirectory"
+ // DataConnectorKindAzureAdvancedThreatProtection ...
+ DataConnectorKindAzureAdvancedThreatProtection DataConnectorKind = "AzureAdvancedThreatProtection"
+ // DataConnectorKindAzureSecurityCenter ...
+ DataConnectorKindAzureSecurityCenter DataConnectorKind = "AzureSecurityCenter"
+ // DataConnectorKindMicrosoftCloudAppSecurity ...
+ DataConnectorKindMicrosoftCloudAppSecurity DataConnectorKind = "MicrosoftCloudAppSecurity"
+ // DataConnectorKindMicrosoftDefenderAdvancedThreatProtection ...
+ DataConnectorKindMicrosoftDefenderAdvancedThreatProtection DataConnectorKind = "MicrosoftDefenderAdvancedThreatProtection"
+ // DataConnectorKindOffice365 ...
+ DataConnectorKindOffice365 DataConnectorKind = "Office365"
+ // DataConnectorKindThreatIntelligence ...
+ DataConnectorKindThreatIntelligence DataConnectorKind = "ThreatIntelligence"
)
// PossibleDataConnectorKindValues returns an array of possible values for the DataConnectorKind const type.
func PossibleDataConnectorKindValues() []DataConnectorKind {
- return []DataConnectorKind{AmazonWebServicesCloudTrail, AzureActiveDirectory, AzureAdvancedThreatProtection, AzureSecurityCenter, MicrosoftCloudAppSecurity, MicrosoftDefenderAdvancedThreatProtection, Office365, ThreatIntelligence}
+ return []DataConnectorKind{DataConnectorKindAmazonWebServicesCloudTrail, DataConnectorKindAzureActiveDirectory, DataConnectorKindAzureAdvancedThreatProtection, DataConnectorKindAzureSecurityCenter, DataConnectorKindMicrosoftCloudAppSecurity, DataConnectorKindMicrosoftDefenderAdvancedThreatProtection, DataConnectorKindOffice365, DataConnectorKindThreatIntelligence}
}
// DataTypeState enumerates the values for data type state.
@@ -175,21 +275,221 @@ func PossibleDataTypeStateValues() []DataTypeState {
return []DataTypeState{Disabled, Enabled}
}
+// DataTypeStatus enumerates the values for data type status.
+type DataTypeStatus string
+
+const (
+ // Exist ...
+ Exist DataTypeStatus = "Exist"
+ // NotExist ...
+ NotExist DataTypeStatus = "NotExist"
+)
+
+// PossibleDataTypeStatusValues returns an array of possible values for the DataTypeStatus const type.
+func PossibleDataTypeStatusValues() []DataTypeStatus {
+ return []DataTypeStatus{Exist, NotExist}
+}
+
+// ElevationToken enumerates the values for elevation token.
+type ElevationToken string
+
+const (
+ // Default Default elevation token
+ Default ElevationToken = "Default"
+ // Full Full elevation token
+ Full ElevationToken = "Full"
+ // Limited Limited elevation token
+ Limited ElevationToken = "Limited"
+)
+
+// PossibleElevationTokenValues returns an array of possible values for the ElevationToken const type.
+func PossibleElevationTokenValues() []ElevationToken {
+ return []ElevationToken{Default, Full, Limited}
+}
+
// EntityKind enumerates the values for entity kind.
type EntityKind string
const (
- // Account Entity represents account in the system.
- Account EntityKind = "Account"
- // File Entity represents file in the system.
- File EntityKind = "File"
- // Host Entity represents host in the system.
- Host EntityKind = "Host"
+ // EntityKindAccount Entity represents account in the system.
+ EntityKindAccount EntityKind = "Account"
+ // EntityKindAzureResource Entity represents azure resource in the system.
+ EntityKindAzureResource EntityKind = "AzureResource"
+ // EntityKindBookmark Entity represents bookmark in the system.
+ EntityKindBookmark EntityKind = "Bookmark"
+ // EntityKindCloudApplication Entity represents cloud application in the system.
+ EntityKindCloudApplication EntityKind = "CloudApplication"
+ // EntityKindDNSResolution Entity represents dns resolution in the system.
+ EntityKindDNSResolution EntityKind = "DnsResolution"
+ // EntityKindFile Entity represents file in the system.
+ EntityKindFile EntityKind = "File"
+ // EntityKindFileHash Entity represents file hash in the system.
+ EntityKindFileHash EntityKind = "FileHash"
+ // EntityKindHost Entity represents host in the system.
+ EntityKindHost EntityKind = "Host"
+ // EntityKindIP Entity represents ip in the system.
+ EntityKindIP EntityKind = "Ip"
+ // EntityKindMalware Entity represents malware in the system.
+ EntityKindMalware EntityKind = "Malware"
+ // EntityKindProcess Entity represents process in the system.
+ EntityKindProcess EntityKind = "Process"
+ // EntityKindRegistryKey Entity represents registry key in the system.
+ EntityKindRegistryKey EntityKind = "RegistryKey"
+ // EntityKindRegistryValue Entity represents registry value in the system.
+ EntityKindRegistryValue EntityKind = "RegistryValue"
+ // EntityKindSecurityAlert Entity represents security alert in the system.
+ EntityKindSecurityAlert EntityKind = "SecurityAlert"
+ // EntityKindSecurityGroup Entity represents security group in the system.
+ EntityKindSecurityGroup EntityKind = "SecurityGroup"
+ // EntityKindURL Entity represents url in the system.
+ EntityKindURL EntityKind = "Url"
)
// PossibleEntityKindValues returns an array of possible values for the EntityKind const type.
func PossibleEntityKindValues() []EntityKind {
- return []EntityKind{Account, File, Host}
+ return []EntityKind{EntityKindAccount, EntityKindAzureResource, EntityKindBookmark, EntityKindCloudApplication, EntityKindDNSResolution, EntityKindFile, EntityKindFileHash, EntityKindHost, EntityKindIP, EntityKindMalware, EntityKindProcess, EntityKindRegistryKey, EntityKindRegistryValue, EntityKindSecurityAlert, EntityKindSecurityGroup, EntityKindURL}
+}
+
+// EntityType enumerates the values for entity type.
+type EntityType string
+
+const (
+ // EntityTypeAccount Entity represents account in the system.
+ EntityTypeAccount EntityType = "Account"
+ // EntityTypeAzureResource Entity represents azure resource in the system.
+ EntityTypeAzureResource EntityType = "AzureResource"
+ // EntityTypeCloudApplication Entity represents cloud application in the system.
+ EntityTypeCloudApplication EntityType = "CloudApplication"
+ // EntityTypeDNS Entity represents dns in the system.
+ EntityTypeDNS EntityType = "DNS"
+ // EntityTypeFile Entity represents file in the system.
+ EntityTypeFile EntityType = "File"
+ // EntityTypeFileHash Entity represents file hash in the system.
+ EntityTypeFileHash EntityType = "FileHash"
+ // EntityTypeHost Entity represents host in the system.
+ EntityTypeHost EntityType = "Host"
+ // EntityTypeHuntingBookmark Entity represents HuntingBookmark in the system.
+ EntityTypeHuntingBookmark EntityType = "HuntingBookmark"
+ // EntityTypeIP Entity represents ip in the system.
+ EntityTypeIP EntityType = "IP"
+ // EntityTypeMalware Entity represents malware in the system.
+ EntityTypeMalware EntityType = "Malware"
+ // EntityTypeProcess Entity represents process in the system.
+ EntityTypeProcess EntityType = "Process"
+ // EntityTypeRegistryKey Entity represents registry key in the system.
+ EntityTypeRegistryKey EntityType = "RegistryKey"
+ // EntityTypeRegistryValue Entity represents registry value in the system.
+ EntityTypeRegistryValue EntityType = "RegistryValue"
+ // EntityTypeSecurityAlert Entity represents security alert in the system.
+ EntityTypeSecurityAlert EntityType = "SecurityAlert"
+ // EntityTypeSecurityGroup Entity represents security group in the system.
+ EntityTypeSecurityGroup EntityType = "SecurityGroup"
+ // EntityTypeURL Entity represents url in the system.
+ EntityTypeURL EntityType = "URL"
+)
+
+// PossibleEntityTypeValues returns an array of possible values for the EntityType const type.
+func PossibleEntityTypeValues() []EntityType {
+ return []EntityType{EntityTypeAccount, EntityTypeAzureResource, EntityTypeCloudApplication, EntityTypeDNS, EntityTypeFile, EntityTypeFileHash, EntityTypeHost, EntityTypeHuntingBookmark, EntityTypeIP, EntityTypeMalware, EntityTypeProcess, EntityTypeRegistryKey, EntityTypeRegistryValue, EntityTypeSecurityAlert, EntityTypeSecurityGroup, EntityTypeURL}
+}
+
+// FileHashAlgorithm enumerates the values for file hash algorithm.
+type FileHashAlgorithm string
+
+const (
+ // MD5 MD5 hash type
+ MD5 FileHashAlgorithm = "MD5"
+ // SHA1 SHA1 hash type
+ SHA1 FileHashAlgorithm = "SHA1"
+ // SHA256 SHA256 hash type
+ SHA256 FileHashAlgorithm = "SHA256"
+ // SHA256AC SHA256 Authenticode hash type
+ SHA256AC FileHashAlgorithm = "SHA256AC"
+ // Unknown Unknown hash algorithm
+ Unknown FileHashAlgorithm = "Unknown"
+)
+
+// PossibleFileHashAlgorithmValues returns an array of possible values for the FileHashAlgorithm const type.
+func PossibleFileHashAlgorithmValues() []FileHashAlgorithm {
+ return []FileHashAlgorithm{MD5, SHA1, SHA256, SHA256AC, Unknown}
+}
+
+// KillChainIntent enumerates the values for kill chain intent.
+type KillChainIntent string
+
+const (
+ // KillChainIntentCollection Collection consists of techniques used to identify and gather information,
+ // such as sensitive files, from a target network prior to exfiltration. This category also covers
+ // locations on a system or network where the adversary may look for information to exfiltrate.
+ KillChainIntentCollection KillChainIntent = "Collection"
+ // KillChainIntentCommandAndControl The command and control tactic represents how adversaries communicate
+ // with systems under their control within a target network.
+ KillChainIntentCommandAndControl KillChainIntent = "CommandAndControl"
+ // KillChainIntentCredentialAccess Credential access represents techniques resulting in access to or
+ // control over system, domain, or service credentials that are used within an enterprise environment.
+ // Adversaries will likely attempt to obtain legitimate credentials from users or administrator accounts
+ // (local system administrator or domain users with administrator access) to use within the network. With
+ // sufficient access within a network, an adversary can create accounts for later use within the
+ // environment.
+ KillChainIntentCredentialAccess KillChainIntent = "CredentialAccess"
+ // KillChainIntentDefenseEvasion Defense evasion consists of techniques an adversary may use to evade
+ // detection or avoid other defenses. Sometimes these actions are the same as or variations of techniques
+ // in other categories that have the added benefit of subverting a particular defense or mitigation.
+ KillChainIntentDefenseEvasion KillChainIntent = "DefenseEvasion"
+ // KillChainIntentDiscovery Discovery consists of techniques that allow the adversary to gain knowledge
+ // about the system and internal network. When adversaries gain access to a new system, they must orient
+ // themselves to what they now have control of and what benefits operating from that system give to their
+ // current objective or overall goals during the intrusion. The operating system provides many native tools
+ // that aid in this post-compromise information-gathering phase.
+ KillChainIntentDiscovery KillChainIntent = "Discovery"
+ // KillChainIntentExecution The execution tactic represents techniques that result in execution of
+ // adversary-controlled code on a local or remote system. This tactic is often used in conjunction with
+ // lateral movement to expand access to remote systems on a network.
+ KillChainIntentExecution KillChainIntent = "Execution"
+ // KillChainIntentExfiltration Exfiltration refers to techniques and attributes that result or aid in the
+ // adversary removing files and information from a target network. This category also covers locations on a
+ // system or network where the adversary may look for information to exfiltrate.
+ KillChainIntentExfiltration KillChainIntent = "Exfiltration"
+ // KillChainIntentExploitation Exploitation is the stage where an attacker manage to get foothold on the
+ // attacked resource. This stage is applicable not only for compute hosts, but also for resources such as
+ // user accounts, certificates etc. Adversaries will often be able to control the resource after this
+ // stage.
+ KillChainIntentExploitation KillChainIntent = "Exploitation"
+ // KillChainIntentImpact The impact intent primary objective is to directly reduce the availability or
+ // integrity of a system, service, or network; including manipulation of data to impact a business or
+ // operational process. This would often refer to techniques such as ransom-ware, defacement, data
+ // manipulation and others.
+ KillChainIntentImpact KillChainIntent = "Impact"
+ // KillChainIntentLateralMovement Lateral movement consists of techniques that enable an adversary to
+ // access and control remote systems on a network and could, but does not necessarily, include execution of
+ // tools on remote systems. The lateral movement techniques could allow an adversary to gather information
+ // from a system without needing additional tools, such as a remote access tool. An adversary can use
+ // lateral movement for many purposes, including remote Execution of tools, pivoting to additional systems,
+ // access to specific information or files, access to additional credentials, or to cause an effect.
+ KillChainIntentLateralMovement KillChainIntent = "LateralMovement"
+ // KillChainIntentPersistence Persistence is any access, action, or configuration change to a system that
+ // gives an adversary a persistent presence on that system. Adversaries will often need to maintain access
+ // to systems through interruptions such as system restarts, loss of credentials, or other failures that
+ // would require a remote access tool to restart or alternate backdoor for them to regain access.
+ KillChainIntentPersistence KillChainIntent = "Persistence"
+ // KillChainIntentPrivilegeEscalation Privilege escalation is the result of actions that allow an adversary
+ // to obtain a higher level of permissions on a system or network. Certain tools or actions require a
+ // higher level of privilege to work and are likely necessary at many points throughout an operation. User
+ // accounts with permissions to access specific systems or perform specific functions necessary for
+ // adversaries to achieve their objective may also be considered an escalation of privilege.
+ KillChainIntentPrivilegeEscalation KillChainIntent = "PrivilegeEscalation"
+ // KillChainIntentProbing Probing could be an attempt to access a certain resource regardless of a
+ // malicious intent or a failed attempt to gain access to a target system to gather information prior to
+ // exploitation. This step is usually detected as an attempt originating from outside the network in
+ // attempt to scan the target system and find a way in.
+ KillChainIntentProbing KillChainIntent = "Probing"
+ // KillChainIntentUnknown The default value.
+ KillChainIntentUnknown KillChainIntent = "Unknown"
+)
+
+// PossibleKillChainIntentValues returns an array of possible values for the KillChainIntent const type.
+func PossibleKillChainIntentValues() []KillChainIntent {
+ return []KillChainIntent{KillChainIntentCollection, KillChainIntentCommandAndControl, KillChainIntentCredentialAccess, KillChainIntentDefenseEvasion, KillChainIntentDiscovery, KillChainIntentExecution, KillChainIntentExfiltration, KillChainIntentExploitation, KillChainIntentImpact, KillChainIntentLateralMovement, KillChainIntentPersistence, KillChainIntentPrivilegeEscalation, KillChainIntentProbing, KillChainIntentUnknown}
}
// Kind enumerates the values for kind.
@@ -222,6 +522,25 @@ func PossibleKindBasicAggregationsValues() []KindBasicAggregations {
return []KindBasicAggregations{KindAggregations, KindCasesAggregation}
}
+// KindBasicAlertRuleTemplate enumerates the values for kind basic alert rule template.
+type KindBasicAlertRuleTemplate string
+
+const (
+ // KindBasicAlertRuleTemplateKindAlertRuleTemplate ...
+ KindBasicAlertRuleTemplateKindAlertRuleTemplate KindBasicAlertRuleTemplate = "AlertRuleTemplate"
+ // KindBasicAlertRuleTemplateKindFilter ...
+ KindBasicAlertRuleTemplateKindFilter KindBasicAlertRuleTemplate = "Filter"
+ // KindBasicAlertRuleTemplateKindFusion ...
+ KindBasicAlertRuleTemplateKindFusion KindBasicAlertRuleTemplate = "Fusion"
+ // KindBasicAlertRuleTemplateKindScheduled ...
+ KindBasicAlertRuleTemplateKindScheduled KindBasicAlertRuleTemplate = "Scheduled"
+)
+
+// PossibleKindBasicAlertRuleTemplateValues returns an array of possible values for the KindBasicAlertRuleTemplate const type.
+func PossibleKindBasicAlertRuleTemplateValues() []KindBasicAlertRuleTemplate {
+ return []KindBasicAlertRuleTemplate{KindBasicAlertRuleTemplateKindAlertRuleTemplate, KindBasicAlertRuleTemplateKindFilter, KindBasicAlertRuleTemplateKindFusion, KindBasicAlertRuleTemplateKindScheduled}
+}
+
// KindBasicDataConnector enumerates the values for kind basic data connector.
type KindBasicDataConnector string
@@ -257,17 +576,41 @@ type KindBasicEntity string
const (
// KindAccount ...
KindAccount KindBasicEntity = "Account"
+ // KindAzureResource ...
+ KindAzureResource KindBasicEntity = "AzureResource"
+ // KindCloudApplication ...
+ KindCloudApplication KindBasicEntity = "CloudApplication"
+ // KindDNSResolution ...
+ KindDNSResolution KindBasicEntity = "DnsResolution"
// KindEntity ...
KindEntity KindBasicEntity = "Entity"
// KindFile ...
KindFile KindBasicEntity = "File"
+ // KindFileHash ...
+ KindFileHash KindBasicEntity = "FileHash"
// KindHost ...
KindHost KindBasicEntity = "Host"
+ // KindIP ...
+ KindIP KindBasicEntity = "Ip"
+ // KindMalware ...
+ KindMalware KindBasicEntity = "Malware"
+ // KindProcess ...
+ KindProcess KindBasicEntity = "Process"
+ // KindRegistryKey ...
+ KindRegistryKey KindBasicEntity = "RegistryKey"
+ // KindRegistryValue ...
+ KindRegistryValue KindBasicEntity = "RegistryValue"
+ // KindSecurityAlert ...
+ KindSecurityAlert KindBasicEntity = "SecurityAlert"
+ // KindSecurityGroup ...
+ KindSecurityGroup KindBasicEntity = "SecurityGroup"
+ // KindURL ...
+ KindURL KindBasicEntity = "Url"
)
// PossibleKindBasicEntityValues returns an array of possible values for the KindBasicEntity const type.
func PossibleKindBasicEntityValues() []KindBasicEntity {
- return []KindBasicEntity{KindAccount, KindEntity, KindFile, KindHost}
+ return []KindBasicEntity{KindAccount, KindAzureResource, KindCloudApplication, KindDNSResolution, KindEntity, KindFile, KindFileHash, KindHost, KindIP, KindMalware, KindProcess, KindRegistryKey, KindRegistryValue, KindSecurityAlert, KindSecurityGroup, KindURL}
}
// KindBasicSettings enumerates the values for kind basic settings.
@@ -321,6 +664,64 @@ func PossibleOSFamilyValues() []OSFamily {
return []OSFamily{Android, IOS, Linux, Windows}
}
+// RegistryHive enumerates the values for registry hive.
+type RegistryHive string
+
+const (
+ // HKEYA HKEY_A
+ HKEYA RegistryHive = "HKEY_A"
+ // HKEYCLASSESROOT HKEY_CLASSES_ROOT
+ HKEYCLASSESROOT RegistryHive = "HKEY_CLASSES_ROOT"
+ // HKEYCURRENTCONFIG HKEY_CURRENT_CONFIG
+ HKEYCURRENTCONFIG RegistryHive = "HKEY_CURRENT_CONFIG"
+ // HKEYCURRENTUSER HKEY_CURRENT_USER
+ HKEYCURRENTUSER RegistryHive = "HKEY_CURRENT_USER"
+ // HKEYCURRENTUSERLOCALSETTINGS HKEY_CURRENT_USER_LOCAL_SETTINGS
+ HKEYCURRENTUSERLOCALSETTINGS RegistryHive = "HKEY_CURRENT_USER_LOCAL_SETTINGS"
+ // HKEYLOCALMACHINE HKEY_LOCAL_MACHINE
+ HKEYLOCALMACHINE RegistryHive = "HKEY_LOCAL_MACHINE"
+ // HKEYPERFORMANCEDATA HKEY_PERFORMANCE_DATA
+ HKEYPERFORMANCEDATA RegistryHive = "HKEY_PERFORMANCE_DATA"
+ // HKEYPERFORMANCENLSTEXT HKEY_PERFORMANCE_NLSTEXT
+ HKEYPERFORMANCENLSTEXT RegistryHive = "HKEY_PERFORMANCE_NLSTEXT"
+ // HKEYPERFORMANCETEXT HKEY_PERFORMANCE_TEXT
+ HKEYPERFORMANCETEXT RegistryHive = "HKEY_PERFORMANCE_TEXT"
+ // HKEYUSERS HKEY_USERS
+ HKEYUSERS RegistryHive = "HKEY_USERS"
+)
+
+// PossibleRegistryHiveValues returns an array of possible values for the RegistryHive const type.
+func PossibleRegistryHiveValues() []RegistryHive {
+ return []RegistryHive{HKEYA, HKEYCLASSESROOT, HKEYCURRENTCONFIG, HKEYCURRENTUSER, HKEYCURRENTUSERLOCALSETTINGS, HKEYLOCALMACHINE, HKEYPERFORMANCEDATA, HKEYPERFORMANCENLSTEXT, HKEYPERFORMANCETEXT, HKEYUSERS}
+}
+
+// RegistryValueKind enumerates the values for registry value kind.
+type RegistryValueKind string
+
+const (
+ // RegistryValueKindBinary Binary value type
+ RegistryValueKindBinary RegistryValueKind = "Binary"
+ // RegistryValueKindDWord DWord value type
+ RegistryValueKindDWord RegistryValueKind = "DWord"
+ // RegistryValueKindExpandString ExpandString value type
+ RegistryValueKindExpandString RegistryValueKind = "ExpandString"
+ // RegistryValueKindMultiString MultiString value type
+ RegistryValueKindMultiString RegistryValueKind = "MultiString"
+ // RegistryValueKindNone None
+ RegistryValueKindNone RegistryValueKind = "None"
+ // RegistryValueKindQWord QWord value type
+ RegistryValueKindQWord RegistryValueKind = "QWord"
+ // RegistryValueKindString String value type
+ RegistryValueKindString RegistryValueKind = "String"
+ // RegistryValueKindUnknown Unknown value type
+ RegistryValueKindUnknown RegistryValueKind = "Unknown"
+)
+
+// PossibleRegistryValueKindValues returns an array of possible values for the RegistryValueKind const type.
+func PossibleRegistryValueKindValues() []RegistryValueKind {
+ return []RegistryValueKind{RegistryValueKindBinary, RegistryValueKindDWord, RegistryValueKindExpandString, RegistryValueKindMultiString, RegistryValueKindNone, RegistryValueKindQWord, RegistryValueKindString, RegistryValueKindUnknown}
+}
+
// SettingKind enumerates the values for setting kind.
type SettingKind string
@@ -351,6 +752,23 @@ func PossibleStatusInMcasValues() []StatusInMcas {
return []StatusInMcas{StatusInMcasDisabled, StatusInMcasEnabled}
}
+// TemplateStatus enumerates the values for template status.
+type TemplateStatus string
+
+const (
+ // Available Alert rule template is available.
+ Available TemplateStatus = "Available"
+ // Installed Alert rule template installed. and can not use more then once
+ Installed TemplateStatus = "Installed"
+ // NotAvailable Alert rule template is not available
+ NotAvailable TemplateStatus = "NotAvailable"
+)
+
+// PossibleTemplateStatusValues returns an array of possible values for the TemplateStatus const type.
+func PossibleTemplateStatusValues() []TemplateStatus {
+ return []TemplateStatus{Available, Installed, NotAvailable}
+}
+
// TriggerOperator enumerates the values for trigger operator.
type TriggerOperator string
@@ -698,7 +1116,7 @@ type AccountEntity struct {
Type *string `json:"type,omitempty"`
// Name - READ-ONLY; Azure resource name
Name *string `json:"name,omitempty"`
- // Kind - Possible values include: 'KindEntity', 'KindAccount', 'KindHost', 'KindFile'
+ // Kind - Possible values include: 'KindEntity', 'KindAccount', 'KindHost', 'KindFile', 'KindSecurityAlert', 'KindFileHash', 'KindMalware', 'KindSecurityGroup', 'KindAzureResource', 'KindCloudApplication', 'KindProcess', 'KindDNSResolution', 'KindIP', 'KindRegistryKey', 'KindRegistryValue', 'KindURL'
Kind KindBasicEntity `json:"kind,omitempty"`
}
@@ -730,6 +1148,66 @@ func (ae AccountEntity) AsFileEntity() (*FileEntity, bool) {
return nil, false
}
+// AsSecurityAlert is the BasicEntity implementation for AccountEntity.
+func (ae AccountEntity) AsSecurityAlert() (*SecurityAlert, bool) {
+ return nil, false
+}
+
+// AsFileHashEntity is the BasicEntity implementation for AccountEntity.
+func (ae AccountEntity) AsFileHashEntity() (*FileHashEntity, bool) {
+ return nil, false
+}
+
+// AsMalwareEntity is the BasicEntity implementation for AccountEntity.
+func (ae AccountEntity) AsMalwareEntity() (*MalwareEntity, bool) {
+ return nil, false
+}
+
+// AsSecurityGroupEntity is the BasicEntity implementation for AccountEntity.
+func (ae AccountEntity) AsSecurityGroupEntity() (*SecurityGroupEntity, bool) {
+ return nil, false
+}
+
+// AsAzureResourceEntity is the BasicEntity implementation for AccountEntity.
+func (ae AccountEntity) AsAzureResourceEntity() (*AzureResourceEntity, bool) {
+ return nil, false
+}
+
+// AsCloudApplicationEntity is the BasicEntity implementation for AccountEntity.
+func (ae AccountEntity) AsCloudApplicationEntity() (*CloudApplicationEntity, bool) {
+ return nil, false
+}
+
+// AsProcessEntity is the BasicEntity implementation for AccountEntity.
+func (ae AccountEntity) AsProcessEntity() (*ProcessEntity, bool) {
+ return nil, false
+}
+
+// AsDNSEntity is the BasicEntity implementation for AccountEntity.
+func (ae AccountEntity) AsDNSEntity() (*DNSEntity, bool) {
+ return nil, false
+}
+
+// AsIPEntity is the BasicEntity implementation for AccountEntity.
+func (ae AccountEntity) AsIPEntity() (*IPEntity, bool) {
+ return nil, false
+}
+
+// AsRegistryKeyEntity is the BasicEntity implementation for AccountEntity.
+func (ae AccountEntity) AsRegistryKeyEntity() (*RegistryKeyEntity, bool) {
+ return nil, false
+}
+
+// AsRegistryValueEntity is the BasicEntity implementation for AccountEntity.
+func (ae AccountEntity) AsRegistryValueEntity() (*RegistryValueEntity, bool) {
+ return nil, false
+}
+
+// AsURLEntity is the BasicEntity implementation for AccountEntity.
+func (ae AccountEntity) AsURLEntity() (*URLEntity, bool) {
+ return nil, false
+}
+
// AsEntity is the BasicEntity implementation for AccountEntity.
func (ae AccountEntity) AsEntity() (*Entity, bool) {
return nil, false
@@ -818,8 +1296,22 @@ type AccountEntityProperties struct {
Puid *string `json:"puid,omitempty"`
// IsDomainJoined - READ-ONLY; Determines whether this is a domain account.
IsDomainJoined *bool `json:"isDomainJoined,omitempty"`
+ // DisplayName - READ-ONLY; The display name of the account.
+ DisplayName *string `json:"displayName,omitempty"`
// ObjectGUID - READ-ONLY; The objectGUID attribute is a single-value attribute that is the unique identifier for the object, assigned by active directory.
ObjectGUID *uuid.UUID `json:"objectGuid,omitempty"`
+ // HostEntityID - READ-ONLY; The Host entity id that contains the account in case it is a local account (not domain joined)
+ HostEntityID *string `json:"hostEntityId,omitempty"`
+ // FriendlyName - READ-ONLY; The graph item display name which is a short humanly readable description of the graph item instance. This property is optional and might be system generated.
+ FriendlyName *string `json:"friendlyName,omitempty"`
+ // AdditionalData - READ-ONLY; A bag of custom fields that should be part of the entity and will be presented to the user.
+ AdditionalData map[string]interface{} `json:"additionalData"`
+}
+
+// MarshalJSON is the custom marshaler for AccountEntityProperties.
+func (aep AccountEntityProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
}
// Action action for alert rule.
@@ -1253,7 +1745,7 @@ func (ar AlertRule) AsBasicAlertRule() (BasicAlertRule, bool) {
// AlertRuleKind1 describes an Azure resource with kind.
type AlertRuleKind1 struct {
- // Kind - The kind of the alert rule. Possible values include: 'Scheduled'
+ // Kind - The kind of the alert rule. Possible values include: 'Scheduled', 'Filter', 'Fusion'
Kind AlertRuleKind `json:"kind,omitempty"`
}
@@ -1452,102 +1944,140 @@ func NewAlertRulesListPage(getNextPage func(context.Context, AlertRulesList) (Al
return AlertRulesListPage{fn: getNextPage}
}
-// AlertsDataTypeOfDataConnector alerts data type for data connectors.
-type AlertsDataTypeOfDataConnector struct {
- // Alerts - Alerts data type connection.
- Alerts *AlertsDataTypeOfDataConnectorAlerts `json:"alerts,omitempty"`
-}
-
-// AlertsDataTypeOfDataConnectorAlerts alerts data type connection.
-type AlertsDataTypeOfDataConnectorAlerts struct {
- // State - Describe whether this data type connection is enabled or not. Possible values include: 'Enabled', 'Disabled'
- State DataTypeState `json:"state,omitempty"`
+// BasicAlertRuleTemplate alert rule template.
+type BasicAlertRuleTemplate interface {
+ AsScheduledAlertRuleTemplate() (*ScheduledAlertRuleTemplate, bool)
+ AsFilterAlertRuleTemplate() (*FilterAlertRuleTemplate, bool)
+ AsFusionAlertRuleTemplate() (*FusionAlertRuleTemplate, bool)
+ AsAlertRuleTemplate() (*AlertRuleTemplate, bool)
}
-// ASCDataConnector represents ASC (Azure Security Center) data connector.
-type ASCDataConnector struct {
- // ASCDataConnectorProperties - ASC (Azure Security Center) data connector properties.
- *ASCDataConnectorProperties `json:"properties,omitempty"`
+// AlertRuleTemplate alert rule template.
+type AlertRuleTemplate struct {
+ autorest.Response `json:"-"`
// ID - READ-ONLY; Azure resource Id
ID *string `json:"id,omitempty"`
// Type - READ-ONLY; Azure resource type
Type *string `json:"type,omitempty"`
// Name - READ-ONLY; Azure resource name
Name *string `json:"name,omitempty"`
- // Etag - Etag of the data connector.
+ // Etag - Etag of the alert rule.
Etag *string `json:"etag,omitempty"`
- // Kind - Possible values include: 'KindDataConnector', 'KindOffice365', 'KindThreatIntelligence', 'KindAmazonWebServicesCloudTrail', 'KindAzureActiveDirectory', 'KindAzureSecurityCenter', 'KindMicrosoftCloudAppSecurity', 'KindAzureAdvancedThreatProtection', 'KindMicrosoftDefenderAdvancedThreatProtection'
- Kind KindBasicDataConnector `json:"kind,omitempty"`
+ // Kind - Possible values include: 'KindBasicAlertRuleTemplateKindAlertRuleTemplate', 'KindBasicAlertRuleTemplateKindScheduled', 'KindBasicAlertRuleTemplateKindFilter', 'KindBasicAlertRuleTemplateKindFusion'
+ Kind KindBasicAlertRuleTemplate `json:"kind,omitempty"`
}
-// MarshalJSON is the custom marshaler for ASCDataConnector.
-func (adc ASCDataConnector) MarshalJSON() ([]byte, error) {
- adc.Kind = KindAzureSecurityCenter
- objectMap := make(map[string]interface{})
- if adc.ASCDataConnectorProperties != nil {
- objectMap["properties"] = adc.ASCDataConnectorProperties
+func unmarshalBasicAlertRuleTemplate(body []byte) (BasicAlertRuleTemplate, error) {
+ var m map[string]interface{}
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return nil, err
}
- if adc.Etag != nil {
- objectMap["etag"] = adc.Etag
+
+ switch m["kind"] {
+ case string(KindBasicAlertRuleTemplateKindScheduled):
+ var sart ScheduledAlertRuleTemplate
+ err := json.Unmarshal(body, &sart)
+ return sart, err
+ case string(KindBasicAlertRuleTemplateKindFilter):
+ var fart FilterAlertRuleTemplate
+ err := json.Unmarshal(body, &fart)
+ return fart, err
+ case string(KindBasicAlertRuleTemplateKindFusion):
+ var fart FusionAlertRuleTemplate
+ err := json.Unmarshal(body, &fart)
+ return fart, err
+ default:
+ var art AlertRuleTemplate
+ err := json.Unmarshal(body, &art)
+ return art, err
}
- if adc.Kind != "" {
- objectMap["kind"] = adc.Kind
+}
+func unmarshalBasicAlertRuleTemplateArray(body []byte) ([]BasicAlertRuleTemplate, error) {
+ var rawMessages []*json.RawMessage
+ err := json.Unmarshal(body, &rawMessages)
+ if err != nil {
+ return nil, err
}
- return json.Marshal(objectMap)
+
+ artArray := make([]BasicAlertRuleTemplate, len(rawMessages))
+
+ for index, rawMessage := range rawMessages {
+ art, err := unmarshalBasicAlertRuleTemplate(*rawMessage)
+ if err != nil {
+ return nil, err
+ }
+ artArray[index] = art
+ }
+ return artArray, nil
}
-// AsOfficeDataConnector is the BasicDataConnector implementation for ASCDataConnector.
-func (adc ASCDataConnector) AsOfficeDataConnector() (*OfficeDataConnector, bool) {
- return nil, false
+// MarshalJSON is the custom marshaler for AlertRuleTemplate.
+func (art AlertRuleTemplate) MarshalJSON() ([]byte, error) {
+ art.Kind = KindBasicAlertRuleTemplateKindAlertRuleTemplate
+ objectMap := make(map[string]interface{})
+ if art.Etag != nil {
+ objectMap["etag"] = art.Etag
+ }
+ if art.Kind != "" {
+ objectMap["kind"] = art.Kind
+ }
+ return json.Marshal(objectMap)
}
-// AsTIDataConnector is the BasicDataConnector implementation for ASCDataConnector.
-func (adc ASCDataConnector) AsTIDataConnector() (*TIDataConnector, bool) {
+// AsScheduledAlertRuleTemplate is the BasicAlertRuleTemplate implementation for AlertRuleTemplate.
+func (art AlertRuleTemplate) AsScheduledAlertRuleTemplate() (*ScheduledAlertRuleTemplate, bool) {
return nil, false
}
-// AsAwsCloudTrailDataConnector is the BasicDataConnector implementation for ASCDataConnector.
-func (adc ASCDataConnector) AsAwsCloudTrailDataConnector() (*AwsCloudTrailDataConnector, bool) {
+// AsFilterAlertRuleTemplate is the BasicAlertRuleTemplate implementation for AlertRuleTemplate.
+func (art AlertRuleTemplate) AsFilterAlertRuleTemplate() (*FilterAlertRuleTemplate, bool) {
return nil, false
}
-// AsAADDataConnector is the BasicDataConnector implementation for ASCDataConnector.
-func (adc ASCDataConnector) AsAADDataConnector() (*AADDataConnector, bool) {
+// AsFusionAlertRuleTemplate is the BasicAlertRuleTemplate implementation for AlertRuleTemplate.
+func (art AlertRuleTemplate) AsFusionAlertRuleTemplate() (*FusionAlertRuleTemplate, bool) {
return nil, false
}
-// AsASCDataConnector is the BasicDataConnector implementation for ASCDataConnector.
-func (adc ASCDataConnector) AsASCDataConnector() (*ASCDataConnector, bool) {
- return &adc, true
+// AsAlertRuleTemplate is the BasicAlertRuleTemplate implementation for AlertRuleTemplate.
+func (art AlertRuleTemplate) AsAlertRuleTemplate() (*AlertRuleTemplate, bool) {
+ return &art, true
}
-// AsMCASDataConnector is the BasicDataConnector implementation for ASCDataConnector.
-func (adc ASCDataConnector) AsMCASDataConnector() (*MCASDataConnector, bool) {
- return nil, false
+// AsBasicAlertRuleTemplate is the BasicAlertRuleTemplate implementation for AlertRuleTemplate.
+func (art AlertRuleTemplate) AsBasicAlertRuleTemplate() (BasicAlertRuleTemplate, bool) {
+ return &art, true
}
-// AsAATPDataConnector is the BasicDataConnector implementation for ASCDataConnector.
-func (adc ASCDataConnector) AsAATPDataConnector() (*AATPDataConnector, bool) {
- return nil, false
+// AlertRuleTemplateModel ...
+type AlertRuleTemplateModel struct {
+ autorest.Response `json:"-"`
+ Value BasicAlertRuleTemplate `json:"value,omitempty"`
}
-// AsMDATPDataConnector is the BasicDataConnector implementation for ASCDataConnector.
-func (adc ASCDataConnector) AsMDATPDataConnector() (*MDATPDataConnector, bool) {
- return nil, false
-}
+// UnmarshalJSON is the custom unmarshaler for AlertRuleTemplateModel struct.
+func (artm *AlertRuleTemplateModel) UnmarshalJSON(body []byte) error {
+ art, err := unmarshalBasicAlertRuleTemplate(body)
+ if err != nil {
+ return err
+ }
+ artm.Value = art
-// AsDataConnector is the BasicDataConnector implementation for ASCDataConnector.
-func (adc ASCDataConnector) AsDataConnector() (*DataConnector, bool) {
- return nil, false
+ return nil
}
-// AsBasicDataConnector is the BasicDataConnector implementation for ASCDataConnector.
-func (adc ASCDataConnector) AsBasicDataConnector() (BasicDataConnector, bool) {
- return &adc, true
+// AlertRuleTemplatesList list all the alert rule templates.
+type AlertRuleTemplatesList struct {
+ autorest.Response `json:"-"`
+ // NextLink - READ-ONLY; URL to fetch the next set of alert rule templates.
+ NextLink *string `json:"nextLink,omitempty"`
+ // Value - Array of alert rule templates.
+ Value *[]BasicAlertRuleTemplate `json:"value,omitempty"`
}
-// UnmarshalJSON is the custom unmarshaler for ASCDataConnector struct.
-func (adc *ASCDataConnector) UnmarshalJSON(body []byte) error {
+// UnmarshalJSON is the custom unmarshaler for AlertRuleTemplatesList struct.
+func (artl *AlertRuleTemplatesList) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
@@ -1555,44 +2085,307 @@ func (adc *ASCDataConnector) UnmarshalJSON(body []byte) error {
}
for k, v := range m {
switch k {
- case "properties":
- if v != nil {
- var aSCDataConnectorProperties ASCDataConnectorProperties
- err = json.Unmarshal(*v, &aSCDataConnectorProperties)
- if err != nil {
- return err
- }
- adc.ASCDataConnectorProperties = &aSCDataConnectorProperties
- }
- case "id":
+ case "nextLink":
if v != nil {
- var ID string
- err = json.Unmarshal(*v, &ID)
+ var nextLink string
+ err = json.Unmarshal(*v, &nextLink)
if err != nil {
return err
}
- adc.ID = &ID
+ artl.NextLink = &nextLink
}
- case "type":
+ case "value":
if v != nil {
- var typeVar string
- err = json.Unmarshal(*v, &typeVar)
+ value, err := unmarshalBasicAlertRuleTemplateArray(*v)
if err != nil {
return err
}
- adc.Type = &typeVar
+ artl.Value = &value
}
- case "name":
- if v != nil {
- var name string
- err = json.Unmarshal(*v, &name)
- if err != nil {
- return err
- }
- adc.Name = &name
+ }
+ }
+
+ return nil
+}
+
+// AlertRuleTemplatesListIterator provides access to a complete listing of AlertRuleTemplate values.
+type AlertRuleTemplatesListIterator struct {
+ i int
+ page AlertRuleTemplatesListPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *AlertRuleTemplatesListIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AlertRuleTemplatesListIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
}
- case "etag":
- if v != nil {
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *AlertRuleTemplatesListIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter AlertRuleTemplatesListIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter AlertRuleTemplatesListIterator) Response() AlertRuleTemplatesList {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter AlertRuleTemplatesListIterator) Value() BasicAlertRuleTemplate {
+ if !iter.page.NotDone() {
+ return AlertRuleTemplate{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the AlertRuleTemplatesListIterator type.
+func NewAlertRuleTemplatesListIterator(page AlertRuleTemplatesListPage) AlertRuleTemplatesListIterator {
+ return AlertRuleTemplatesListIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (artl AlertRuleTemplatesList) IsEmpty() bool {
+ return artl.Value == nil || len(*artl.Value) == 0
+}
+
+// alertRuleTemplatesListPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (artl AlertRuleTemplatesList) alertRuleTemplatesListPreparer(ctx context.Context) (*http.Request, error) {
+ if artl.NextLink == nil || len(to.String(artl.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(artl.NextLink)))
+}
+
+// AlertRuleTemplatesListPage contains a page of BasicAlertRuleTemplate values.
+type AlertRuleTemplatesListPage struct {
+ fn func(context.Context, AlertRuleTemplatesList) (AlertRuleTemplatesList, error)
+ artl AlertRuleTemplatesList
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *AlertRuleTemplatesListPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AlertRuleTemplatesListPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.artl)
+ if err != nil {
+ return err
+ }
+ page.artl = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *AlertRuleTemplatesListPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page AlertRuleTemplatesListPage) NotDone() bool {
+ return !page.artl.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page AlertRuleTemplatesListPage) Response() AlertRuleTemplatesList {
+ return page.artl
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page AlertRuleTemplatesListPage) Values() []BasicAlertRuleTemplate {
+ if page.artl.IsEmpty() {
+ return nil
+ }
+ return *page.artl.Value
+}
+
+// Creates a new instance of the AlertRuleTemplatesListPage type.
+func NewAlertRuleTemplatesListPage(getNextPage func(context.Context, AlertRuleTemplatesList) (AlertRuleTemplatesList, error)) AlertRuleTemplatesListPage {
+ return AlertRuleTemplatesListPage{fn: getNextPage}
+}
+
+// AlertsDataTypeOfDataConnector alerts data type for data connectors.
+type AlertsDataTypeOfDataConnector struct {
+ // Alerts - Alerts data type connection.
+ Alerts *AlertsDataTypeOfDataConnectorAlerts `json:"alerts,omitempty"`
+}
+
+// AlertsDataTypeOfDataConnectorAlerts alerts data type connection.
+type AlertsDataTypeOfDataConnectorAlerts struct {
+ // State - Describe whether this data type connection is enabled or not. Possible values include: 'Enabled', 'Disabled'
+ State DataTypeState `json:"state,omitempty"`
+}
+
+// ASCDataConnector represents ASC (Azure Security Center) data connector.
+type ASCDataConnector struct {
+ // ASCDataConnectorProperties - ASC (Azure Security Center) data connector properties.
+ *ASCDataConnectorProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Azure resource Id
+ ID *string `json:"id,omitempty"`
+ // Type - READ-ONLY; Azure resource type
+ Type *string `json:"type,omitempty"`
+ // Name - READ-ONLY; Azure resource name
+ Name *string `json:"name,omitempty"`
+ // Etag - Etag of the data connector.
+ Etag *string `json:"etag,omitempty"`
+ // Kind - Possible values include: 'KindDataConnector', 'KindOffice365', 'KindThreatIntelligence', 'KindAmazonWebServicesCloudTrail', 'KindAzureActiveDirectory', 'KindAzureSecurityCenter', 'KindMicrosoftCloudAppSecurity', 'KindAzureAdvancedThreatProtection', 'KindMicrosoftDefenderAdvancedThreatProtection'
+ Kind KindBasicDataConnector `json:"kind,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ASCDataConnector.
+func (adc ASCDataConnector) MarshalJSON() ([]byte, error) {
+ adc.Kind = KindAzureSecurityCenter
+ objectMap := make(map[string]interface{})
+ if adc.ASCDataConnectorProperties != nil {
+ objectMap["properties"] = adc.ASCDataConnectorProperties
+ }
+ if adc.Etag != nil {
+ objectMap["etag"] = adc.Etag
+ }
+ if adc.Kind != "" {
+ objectMap["kind"] = adc.Kind
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsOfficeDataConnector is the BasicDataConnector implementation for ASCDataConnector.
+func (adc ASCDataConnector) AsOfficeDataConnector() (*OfficeDataConnector, bool) {
+ return nil, false
+}
+
+// AsTIDataConnector is the BasicDataConnector implementation for ASCDataConnector.
+func (adc ASCDataConnector) AsTIDataConnector() (*TIDataConnector, bool) {
+ return nil, false
+}
+
+// AsAwsCloudTrailDataConnector is the BasicDataConnector implementation for ASCDataConnector.
+func (adc ASCDataConnector) AsAwsCloudTrailDataConnector() (*AwsCloudTrailDataConnector, bool) {
+ return nil, false
+}
+
+// AsAADDataConnector is the BasicDataConnector implementation for ASCDataConnector.
+func (adc ASCDataConnector) AsAADDataConnector() (*AADDataConnector, bool) {
+ return nil, false
+}
+
+// AsASCDataConnector is the BasicDataConnector implementation for ASCDataConnector.
+func (adc ASCDataConnector) AsASCDataConnector() (*ASCDataConnector, bool) {
+ return &adc, true
+}
+
+// AsMCASDataConnector is the BasicDataConnector implementation for ASCDataConnector.
+func (adc ASCDataConnector) AsMCASDataConnector() (*MCASDataConnector, bool) {
+ return nil, false
+}
+
+// AsAATPDataConnector is the BasicDataConnector implementation for ASCDataConnector.
+func (adc ASCDataConnector) AsAATPDataConnector() (*AATPDataConnector, bool) {
+ return nil, false
+}
+
+// AsMDATPDataConnector is the BasicDataConnector implementation for ASCDataConnector.
+func (adc ASCDataConnector) AsMDATPDataConnector() (*MDATPDataConnector, bool) {
+ return nil, false
+}
+
+// AsDataConnector is the BasicDataConnector implementation for ASCDataConnector.
+func (adc ASCDataConnector) AsDataConnector() (*DataConnector, bool) {
+ return nil, false
+}
+
+// AsBasicDataConnector is the BasicDataConnector implementation for ASCDataConnector.
+func (adc ASCDataConnector) AsBasicDataConnector() (BasicDataConnector, bool) {
+ return &adc, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for ASCDataConnector struct.
+func (adc *ASCDataConnector) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var aSCDataConnectorProperties ASCDataConnectorProperties
+ err = json.Unmarshal(*v, &aSCDataConnectorProperties)
+ if err != nil {
+ return err
+ }
+ adc.ASCDataConnectorProperties = &aSCDataConnectorProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ adc.ID = &ID
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ adc.Type = &typeVar
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ adc.Name = &name
+ }
+ case "etag":
+ if v != nil {
var etag string
err = json.Unmarshal(*v, &etag)
if err != nil {
@@ -1795,46 +2588,252 @@ type AwsCloudTrailDataConnectorProperties struct {
DataTypes *AwsCloudTrailDataConnectorDataTypes `json:"dataTypes,omitempty"`
}
-// Bookmark represents a bookmark in Azure Security Insights.
-type Bookmark struct {
- autorest.Response `json:"-"`
- // Etag - Etag of the bookmark.
- Etag *string `json:"etag,omitempty"`
- // BookmarkProperties - Bookmark properties
- *BookmarkProperties `json:"properties,omitempty"`
+// AzureResourceEntity represents an azure resource entity.
+type AzureResourceEntity struct {
+ // AzureResourceEntityProperties - AzureResource entity properties
+ *AzureResourceEntityProperties `json:"properties,omitempty"`
// ID - READ-ONLY; Azure resource Id
ID *string `json:"id,omitempty"`
// Type - READ-ONLY; Azure resource type
Type *string `json:"type,omitempty"`
// Name - READ-ONLY; Azure resource name
Name *string `json:"name,omitempty"`
+ // Kind - Possible values include: 'KindEntity', 'KindAccount', 'KindHost', 'KindFile', 'KindSecurityAlert', 'KindFileHash', 'KindMalware', 'KindSecurityGroup', 'KindAzureResource', 'KindCloudApplication', 'KindProcess', 'KindDNSResolution', 'KindIP', 'KindRegistryKey', 'KindRegistryValue', 'KindURL'
+ Kind KindBasicEntity `json:"kind,omitempty"`
}
-// MarshalJSON is the custom marshaler for Bookmark.
-func (b Bookmark) MarshalJSON() ([]byte, error) {
+// MarshalJSON is the custom marshaler for AzureResourceEntity.
+func (are AzureResourceEntity) MarshalJSON() ([]byte, error) {
+ are.Kind = KindAzureResource
objectMap := make(map[string]interface{})
- if b.Etag != nil {
- objectMap["etag"] = b.Etag
+ if are.AzureResourceEntityProperties != nil {
+ objectMap["properties"] = are.AzureResourceEntityProperties
}
- if b.BookmarkProperties != nil {
- objectMap["properties"] = b.BookmarkProperties
+ if are.Kind != "" {
+ objectMap["kind"] = are.Kind
}
return json.Marshal(objectMap)
}
-// UnmarshalJSON is the custom unmarshaler for Bookmark struct.
-func (b *Bookmark) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
- }
- for k, v := range m {
- switch k {
- case "etag":
- if v != nil {
- var etag string
- err = json.Unmarshal(*v, &etag)
+// AsAccountEntity is the BasicEntity implementation for AzureResourceEntity.
+func (are AzureResourceEntity) AsAccountEntity() (*AccountEntity, bool) {
+ return nil, false
+}
+
+// AsHostEntity is the BasicEntity implementation for AzureResourceEntity.
+func (are AzureResourceEntity) AsHostEntity() (*HostEntity, bool) {
+ return nil, false
+}
+
+// AsFileEntity is the BasicEntity implementation for AzureResourceEntity.
+func (are AzureResourceEntity) AsFileEntity() (*FileEntity, bool) {
+ return nil, false
+}
+
+// AsSecurityAlert is the BasicEntity implementation for AzureResourceEntity.
+func (are AzureResourceEntity) AsSecurityAlert() (*SecurityAlert, bool) {
+ return nil, false
+}
+
+// AsFileHashEntity is the BasicEntity implementation for AzureResourceEntity.
+func (are AzureResourceEntity) AsFileHashEntity() (*FileHashEntity, bool) {
+ return nil, false
+}
+
+// AsMalwareEntity is the BasicEntity implementation for AzureResourceEntity.
+func (are AzureResourceEntity) AsMalwareEntity() (*MalwareEntity, bool) {
+ return nil, false
+}
+
+// AsSecurityGroupEntity is the BasicEntity implementation for AzureResourceEntity.
+func (are AzureResourceEntity) AsSecurityGroupEntity() (*SecurityGroupEntity, bool) {
+ return nil, false
+}
+
+// AsAzureResourceEntity is the BasicEntity implementation for AzureResourceEntity.
+func (are AzureResourceEntity) AsAzureResourceEntity() (*AzureResourceEntity, bool) {
+ return &are, true
+}
+
+// AsCloudApplicationEntity is the BasicEntity implementation for AzureResourceEntity.
+func (are AzureResourceEntity) AsCloudApplicationEntity() (*CloudApplicationEntity, bool) {
+ return nil, false
+}
+
+// AsProcessEntity is the BasicEntity implementation for AzureResourceEntity.
+func (are AzureResourceEntity) AsProcessEntity() (*ProcessEntity, bool) {
+ return nil, false
+}
+
+// AsDNSEntity is the BasicEntity implementation for AzureResourceEntity.
+func (are AzureResourceEntity) AsDNSEntity() (*DNSEntity, bool) {
+ return nil, false
+}
+
+// AsIPEntity is the BasicEntity implementation for AzureResourceEntity.
+func (are AzureResourceEntity) AsIPEntity() (*IPEntity, bool) {
+ return nil, false
+}
+
+// AsRegistryKeyEntity is the BasicEntity implementation for AzureResourceEntity.
+func (are AzureResourceEntity) AsRegistryKeyEntity() (*RegistryKeyEntity, bool) {
+ return nil, false
+}
+
+// AsRegistryValueEntity is the BasicEntity implementation for AzureResourceEntity.
+func (are AzureResourceEntity) AsRegistryValueEntity() (*RegistryValueEntity, bool) {
+ return nil, false
+}
+
+// AsURLEntity is the BasicEntity implementation for AzureResourceEntity.
+func (are AzureResourceEntity) AsURLEntity() (*URLEntity, bool) {
+ return nil, false
+}
+
+// AsEntity is the BasicEntity implementation for AzureResourceEntity.
+func (are AzureResourceEntity) AsEntity() (*Entity, bool) {
+ return nil, false
+}
+
+// AsBasicEntity is the BasicEntity implementation for AzureResourceEntity.
+func (are AzureResourceEntity) AsBasicEntity() (BasicEntity, bool) {
+ return &are, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for AzureResourceEntity struct.
+func (are *AzureResourceEntity) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var azureResourceEntityProperties AzureResourceEntityProperties
+ err = json.Unmarshal(*v, &azureResourceEntityProperties)
+ if err != nil {
+ return err
+ }
+ are.AzureResourceEntityProperties = &azureResourceEntityProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ are.ID = &ID
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ are.Type = &typeVar
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ are.Name = &name
+ }
+ case "kind":
+ if v != nil {
+ var kind KindBasicEntity
+ err = json.Unmarshal(*v, &kind)
+ if err != nil {
+ return err
+ }
+ are.Kind = kind
+ }
+ }
+ }
+
+ return nil
+}
+
+// AzureResourceEntityProperties azureResource entity property bag.
+type AzureResourceEntityProperties struct {
+ // ResourceID - READ-ONLY; The azure resource id of the resource
+ ResourceID *string `json:"resourceId,omitempty"`
+ // FriendlyName - READ-ONLY; The graph item display name which is a short humanly readable description of the graph item instance. This property is optional and might be system generated.
+ FriendlyName *string `json:"friendlyName,omitempty"`
+ // AdditionalData - READ-ONLY; A bag of custom fields that should be part of the entity and will be presented to the user.
+ AdditionalData map[string]interface{} `json:"additionalData"`
+}
+
+// MarshalJSON is the custom marshaler for AzureResourceEntityProperties.
+func (arep AzureResourceEntityProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// BaseAlertRuleTemplateProperties base alert rule template property bag.
+type BaseAlertRuleTemplateProperties struct {
+ // DisplayName - The display name for alert rule template.
+ DisplayName *string `json:"displayName,omitempty"`
+ // Description - The description of the alert rule template.
+ Description *string `json:"description,omitempty"`
+ // Tactics - The tactics of the alert rule template
+ Tactics *[]AttackTactic `json:"tactics,omitempty"`
+ // CreatedDateUTC - READ-ONLY; The time that this alert rule template has been added.
+ CreatedDateUTC *string `json:"createdDateUTC,omitempty"`
+ // Status - The alert rule template status. Possible values include: 'Installed', 'Available', 'NotAvailable'
+ Status TemplateStatus `json:"status,omitempty"`
+ // RequiredDataConnectors - The required data connectors for this template
+ RequiredDataConnectors *[]DataConnectorStatus `json:"requiredDataConnectors,omitempty"`
+ // AlertRulesCreatedByTemplateCount - the number of alert rules that were created by this template
+ AlertRulesCreatedByTemplateCount *int32 `json:"alertRulesCreatedByTemplateCount,omitempty"`
+}
+
+// Bookmark represents a bookmark in Azure Security Insights.
+type Bookmark struct {
+ autorest.Response `json:"-"`
+ // Etag - Etag of the bookmark.
+ Etag *string `json:"etag,omitempty"`
+ // BookmarkProperties - Bookmark properties
+ *BookmarkProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Azure resource Id
+ ID *string `json:"id,omitempty"`
+ // Type - READ-ONLY; Azure resource type
+ Type *string `json:"type,omitempty"`
+ // Name - READ-ONLY; Azure resource name
+ Name *string `json:"name,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for Bookmark.
+func (b Bookmark) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if b.Etag != nil {
+ objectMap["etag"] = b.Etag
+ }
+ if b.BookmarkProperties != nil {
+ objectMap["properties"] = b.BookmarkProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for Bookmark struct.
+func (b *Bookmark) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
if err != nil {
return err
}
@@ -2032,10 +3031,10 @@ func NewBookmarkListPage(getNextPage func(context.Context, BookmarkList) (Bookma
type BookmarkProperties struct {
// DisplayName - The display name of the bookmark
DisplayName *string `json:"displayName,omitempty"`
- // LastUpdatedTimeUtc - The last time the bookmark was updated
- LastUpdatedTimeUtc *date.Time `json:"lastUpdatedTimeUtc,omitempty"`
- // CreatedTimeUtc - The time the bookmark was created
- CreatedTimeUtc *date.Time `json:"createdTimeUtc,omitempty"`
+ // Updated - The last time the bookmark was updated
+ Updated *date.Time `json:"updated,omitempty"`
+ // Created - The time the bookmark was created
+ Created *date.Time `json:"created,omitempty"`
// CreatedBy - Describes a user that created the bookmark
CreatedBy *UserInfo `json:"createdBy,omitempty"`
// UpdatedBy - Describes a user that updated the bookmark
@@ -2046,6 +3045,8 @@ type BookmarkProperties struct {
Labels *[]string `json:"labels,omitempty"`
// Query - The query of the bookmark.
Query *string `json:"query,omitempty"`
+ // QueryResult - The query result of the bookmark.
+ QueryResult *string `json:"queryResult,omitempty"`
}
// Case represents a case in Azure Security Insights.
@@ -2135,26 +3136,99 @@ func (c *Case) UnmarshalJSON(body []byte) error {
return nil
}
-// CaseList list all the cases.
-type CaseList struct {
+// CaseComment represents a case comment
+type CaseComment struct {
autorest.Response `json:"-"`
- // NextLink - READ-ONLY; URL to fetch the next set of cases.
+ // CaseCommentProperties - Case comment properties
+ *CaseCommentProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Azure resource Id
+ ID *string `json:"id,omitempty"`
+ // Type - READ-ONLY; Azure resource type
+ Type *string `json:"type,omitempty"`
+ // Name - READ-ONLY; Azure resource name
+ Name *string `json:"name,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for CaseComment.
+func (cc CaseComment) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if cc.CaseCommentProperties != nil {
+ objectMap["properties"] = cc.CaseCommentProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for CaseComment struct.
+func (cc *CaseComment) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var caseCommentProperties CaseCommentProperties
+ err = json.Unmarshal(*v, &caseCommentProperties)
+ if err != nil {
+ return err
+ }
+ cc.CaseCommentProperties = &caseCommentProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ cc.ID = &ID
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ cc.Type = &typeVar
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ cc.Name = &name
+ }
+ }
+ }
+
+ return nil
+}
+
+// CaseCommentList list of case comments.
+type CaseCommentList struct {
+ autorest.Response `json:"-"`
+ // NextLink - READ-ONLY; URL to fetch the next set of comments.
NextLink *string `json:"nextLink,omitempty"`
- // Value - Array of cases.
- Value *[]Case `json:"value,omitempty"`
+ // Value - Array of comments.
+ Value *[]CaseComment `json:"value,omitempty"`
}
-// CaseListIterator provides access to a complete listing of Case values.
-type CaseListIterator struct {
+// CaseCommentListIterator provides access to a complete listing of CaseComment values.
+type CaseCommentListIterator struct {
i int
- page CaseListPage
+ page CaseCommentListPage
}
// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
-func (iter *CaseListIterator) NextWithContext(ctx context.Context) (err error) {
+func (iter *CaseCommentListIterator) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CaseListIterator.NextWithContext")
+ ctx = tracing.StartSpan(ctx, fqdn+"/CaseCommentListIterator.NextWithContext")
defer func() {
sc := -1
if iter.Response().Response.Response != nil {
@@ -2179,62 +3253,62 @@ func (iter *CaseListIterator) NextWithContext(ctx context.Context) (err error) {
// Next advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
-func (iter *CaseListIterator) Next() error {
+func (iter *CaseCommentListIterator) Next() error {
return iter.NextWithContext(context.Background())
}
// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter CaseListIterator) NotDone() bool {
+func (iter CaseCommentListIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
}
// Response returns the raw server response from the last page request.
-func (iter CaseListIterator) Response() CaseList {
+func (iter CaseCommentListIterator) Response() CaseCommentList {
return iter.page.Response()
}
// Value returns the current value or a zero-initialized value if the
// iterator has advanced beyond the end of the collection.
-func (iter CaseListIterator) Value() Case {
+func (iter CaseCommentListIterator) Value() CaseComment {
if !iter.page.NotDone() {
- return Case{}
+ return CaseComment{}
}
return iter.page.Values()[iter.i]
}
-// Creates a new instance of the CaseListIterator type.
-func NewCaseListIterator(page CaseListPage) CaseListIterator {
- return CaseListIterator{page: page}
+// Creates a new instance of the CaseCommentListIterator type.
+func NewCaseCommentListIterator(page CaseCommentListPage) CaseCommentListIterator {
+ return CaseCommentListIterator{page: page}
}
// IsEmpty returns true if the ListResult contains no values.
-func (cl CaseList) IsEmpty() bool {
- return cl.Value == nil || len(*cl.Value) == 0
+func (ccl CaseCommentList) IsEmpty() bool {
+ return ccl.Value == nil || len(*ccl.Value) == 0
}
-// caseListPreparer prepares a request to retrieve the next set of results.
+// caseCommentListPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
-func (cl CaseList) caseListPreparer(ctx context.Context) (*http.Request, error) {
- if cl.NextLink == nil || len(to.String(cl.NextLink)) < 1 {
+func (ccl CaseCommentList) caseCommentListPreparer(ctx context.Context) (*http.Request, error) {
+ if ccl.NextLink == nil || len(to.String(ccl.NextLink)) < 1 {
return nil, nil
}
return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
- autorest.WithBaseURL(to.String(cl.NextLink)))
+ autorest.WithBaseURL(to.String(ccl.NextLink)))
}
-// CaseListPage contains a page of Case values.
-type CaseListPage struct {
- fn func(context.Context, CaseList) (CaseList, error)
- cl CaseList
+// CaseCommentListPage contains a page of CaseComment values.
+type CaseCommentListPage struct {
+ fn func(context.Context, CaseCommentList) (CaseCommentList, error)
+ ccl CaseCommentList
}
// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
-func (page *CaseListPage) NextWithContext(ctx context.Context) (err error) {
+func (page *CaseCommentListPage) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/CaseListPage.NextWithContext")
+ ctx = tracing.StartSpan(ctx, fqdn+"/CaseCommentListPage.NextWithContext")
defer func() {
sc := -1
if page.Response().Response.Response != nil {
@@ -2243,49 +3317,205 @@ func (page *CaseListPage) NextWithContext(ctx context.Context) (err error) {
tracing.EndSpan(ctx, sc, err)
}()
}
- next, err := page.fn(ctx, page.cl)
+ next, err := page.fn(ctx, page.ccl)
if err != nil {
return err
}
- page.cl = next
+ page.ccl = next
return nil
}
// Next advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
-func (page *CaseListPage) Next() error {
+func (page *CaseCommentListPage) Next() error {
return page.NextWithContext(context.Background())
}
// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page CaseListPage) NotDone() bool {
- return !page.cl.IsEmpty()
+func (page CaseCommentListPage) NotDone() bool {
+ return !page.ccl.IsEmpty()
}
// Response returns the raw server response from the last page request.
-func (page CaseListPage) Response() CaseList {
- return page.cl
+func (page CaseCommentListPage) Response() CaseCommentList {
+ return page.ccl
}
// Values returns the slice of values for the current page or nil if there are no values.
-func (page CaseListPage) Values() []Case {
- if page.cl.IsEmpty() {
+func (page CaseCommentListPage) Values() []CaseComment {
+ if page.ccl.IsEmpty() {
return nil
}
- return *page.cl.Value
+ return *page.ccl.Value
}
-// Creates a new instance of the CaseListPage type.
-func NewCaseListPage(getNextPage func(context.Context, CaseList) (CaseList, error)) CaseListPage {
- return CaseListPage{fn: getNextPage}
+// Creates a new instance of the CaseCommentListPage type.
+func NewCaseCommentListPage(getNextPage func(context.Context, CaseCommentList) (CaseCommentList, error)) CaseCommentListPage {
+ return CaseCommentListPage{fn: getNextPage}
}
-// CaseProperties describes case properties
-type CaseProperties struct {
- // LastUpdatedTimeUtc - The last time the case was updated
- LastUpdatedTimeUtc *date.Time `json:"lastUpdatedTimeUtc,omitempty"`
- // CreatedTimeUtc - The time the case was created
+// CaseCommentProperties case comment property bag.
+type CaseCommentProperties struct {
+ // Message - The comment message
+ Message *string `json:"message,omitempty"`
+ // CreatedTimeUtc - READ-ONLY; The time the comment was created
+ CreatedTimeUtc *date.Time `json:"createdTimeUtc,omitempty"`
+ // UserInfo - READ-ONLY; Describes the user that created the comment
+ UserInfo *UserInfo `json:"userInfo,omitempty"`
+}
+
+// CaseList list all the cases.
+type CaseList struct {
+ autorest.Response `json:"-"`
+ // NextLink - READ-ONLY; URL to fetch the next set of cases.
+ NextLink *string `json:"nextLink,omitempty"`
+ // Value - Array of cases.
+ Value *[]Case `json:"value,omitempty"`
+}
+
+// CaseListIterator provides access to a complete listing of Case values.
+type CaseListIterator struct {
+ i int
+ page CaseListPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *CaseListIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CaseListIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *CaseListIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter CaseListIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter CaseListIterator) Response() CaseList {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter CaseListIterator) Value() Case {
+ if !iter.page.NotDone() {
+ return Case{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the CaseListIterator type.
+func NewCaseListIterator(page CaseListPage) CaseListIterator {
+ return CaseListIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (cl CaseList) IsEmpty() bool {
+ return cl.Value == nil || len(*cl.Value) == 0
+}
+
+// caseListPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (cl CaseList) caseListPreparer(ctx context.Context) (*http.Request, error) {
+ if cl.NextLink == nil || len(to.String(cl.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(cl.NextLink)))
+}
+
+// CaseListPage contains a page of Case values.
+type CaseListPage struct {
+ fn func(context.Context, CaseList) (CaseList, error)
+ cl CaseList
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *CaseListPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/CaseListPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.cl)
+ if err != nil {
+ return err
+ }
+ page.cl = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *CaseListPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page CaseListPage) NotDone() bool {
+ return !page.cl.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page CaseListPage) Response() CaseList {
+ return page.cl
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page CaseListPage) Values() []Case {
+ if page.cl.IsEmpty() {
+ return nil
+ }
+ return *page.cl.Value
+}
+
+// Creates a new instance of the CaseListPage type.
+func NewCaseListPage(getNextPage func(context.Context, CaseList) (CaseList, error)) CaseListPage {
+ return CaseListPage{fn: getNextPage}
+}
+
+// CaseProperties describes case properties
+type CaseProperties struct {
+ // LastUpdatedTimeUtc - READ-ONLY; The last time the case was updated
+ LastUpdatedTimeUtc *date.Time `json:"lastUpdatedTimeUtc,omitempty"`
+ // CreatedTimeUtc - READ-ONLY; The time the case was created
CreatedTimeUtc *date.Time `json:"createdTimeUtc,omitempty"`
// EndTimeUtc - The end time of the case
EndTimeUtc *date.Time `json:"endTimeUtc,omitempty"`
@@ -2297,14 +3527,24 @@ type CaseProperties struct {
Description *string `json:"description,omitempty"`
// Title - The title of the case
Title *string `json:"title,omitempty"`
- // AssignedTo - Describes a user that the case is assigned to
- AssignedTo *UserInfo `json:"assignedTo,omitempty"`
+ // Owner - Describes a user that the case is assigned to
+ Owner *UserInfo `json:"owner,omitempty"`
// Severity - The severity of the case. Possible values include: 'CaseSeverityCritical', 'CaseSeverityHigh', 'CaseSeverityMedium', 'CaseSeverityLow', 'CaseSeverityInformational'
Severity CaseSeverity `json:"severity,omitempty"`
// Status - The status of the case. Possible values include: 'CaseStatusDraft', 'CaseStatusNew', 'CaseStatusInProgress', 'CaseStatusClosed'
Status CaseStatus `json:"status,omitempty"`
- // CloseReason - The reason the case was closed. Possible values include: 'Resolved', 'Dismissed', 'Other'
+ // CloseReason - The reason the case was closed. Possible values include: 'Resolved', 'Dismissed', 'TruePositive', 'FalsePositive', 'Other'
CloseReason CloseReason `json:"closeReason,omitempty"`
+ // ClosedReasonText - the case close reason details
+ ClosedReasonText *string `json:"closedReasonText,omitempty"`
+ // RelatedAlertIds - READ-ONLY; List of related alert identifiers
+ RelatedAlertIds *[]string `json:"relatedAlertIds,omitempty"`
+ // CaseNumber - READ-ONLY; a sequential number
+ CaseNumber *int32 `json:"caseNumber,omitempty"`
+ // LastComment - READ-ONLY; the last comment in the case
+ LastComment *string `json:"lastComment,omitempty"`
+ // TotalComments - READ-ONLY; the number of total comments in the case
+ TotalComments *int32 `json:"totalComments,omitempty"`
}
// CasesAggregation represents aggregations results for cases.
@@ -2443,23 +3683,120 @@ type CasesAggregationProperties struct {
AggregationByStatus *CasesAggregationByStatusProperties `json:"aggregationByStatus,omitempty"`
}
-// CloudError error response structure.
-type CloudError struct {
- // CloudErrorBody - Error data
- *CloudErrorBody `json:"error,omitempty"`
+// CloudApplicationEntity represents a cloud application entity.
+type CloudApplicationEntity struct {
+ // CloudApplicationEntityProperties - CloudApplication entity properties
+ *CloudApplicationEntityProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Azure resource Id
+ ID *string `json:"id,omitempty"`
+ // Type - READ-ONLY; Azure resource type
+ Type *string `json:"type,omitempty"`
+ // Name - READ-ONLY; Azure resource name
+ Name *string `json:"name,omitempty"`
+ // Kind - Possible values include: 'KindEntity', 'KindAccount', 'KindHost', 'KindFile', 'KindSecurityAlert', 'KindFileHash', 'KindMalware', 'KindSecurityGroup', 'KindAzureResource', 'KindCloudApplication', 'KindProcess', 'KindDNSResolution', 'KindIP', 'KindRegistryKey', 'KindRegistryValue', 'KindURL'
+ Kind KindBasicEntity `json:"kind,omitempty"`
}
-// MarshalJSON is the custom marshaler for CloudError.
-func (ce CloudError) MarshalJSON() ([]byte, error) {
+// MarshalJSON is the custom marshaler for CloudApplicationEntity.
+func (cae CloudApplicationEntity) MarshalJSON() ([]byte, error) {
+ cae.Kind = KindCloudApplication
objectMap := make(map[string]interface{})
- if ce.CloudErrorBody != nil {
- objectMap["error"] = ce.CloudErrorBody
+ if cae.CloudApplicationEntityProperties != nil {
+ objectMap["properties"] = cae.CloudApplicationEntityProperties
+ }
+ if cae.Kind != "" {
+ objectMap["kind"] = cae.Kind
}
return json.Marshal(objectMap)
}
-// UnmarshalJSON is the custom unmarshaler for CloudError struct.
-func (ce *CloudError) UnmarshalJSON(body []byte) error {
+// AsAccountEntity is the BasicEntity implementation for CloudApplicationEntity.
+func (cae CloudApplicationEntity) AsAccountEntity() (*AccountEntity, bool) {
+ return nil, false
+}
+
+// AsHostEntity is the BasicEntity implementation for CloudApplicationEntity.
+func (cae CloudApplicationEntity) AsHostEntity() (*HostEntity, bool) {
+ return nil, false
+}
+
+// AsFileEntity is the BasicEntity implementation for CloudApplicationEntity.
+func (cae CloudApplicationEntity) AsFileEntity() (*FileEntity, bool) {
+ return nil, false
+}
+
+// AsSecurityAlert is the BasicEntity implementation for CloudApplicationEntity.
+func (cae CloudApplicationEntity) AsSecurityAlert() (*SecurityAlert, bool) {
+ return nil, false
+}
+
+// AsFileHashEntity is the BasicEntity implementation for CloudApplicationEntity.
+func (cae CloudApplicationEntity) AsFileHashEntity() (*FileHashEntity, bool) {
+ return nil, false
+}
+
+// AsMalwareEntity is the BasicEntity implementation for CloudApplicationEntity.
+func (cae CloudApplicationEntity) AsMalwareEntity() (*MalwareEntity, bool) {
+ return nil, false
+}
+
+// AsSecurityGroupEntity is the BasicEntity implementation for CloudApplicationEntity.
+func (cae CloudApplicationEntity) AsSecurityGroupEntity() (*SecurityGroupEntity, bool) {
+ return nil, false
+}
+
+// AsAzureResourceEntity is the BasicEntity implementation for CloudApplicationEntity.
+func (cae CloudApplicationEntity) AsAzureResourceEntity() (*AzureResourceEntity, bool) {
+ return nil, false
+}
+
+// AsCloudApplicationEntity is the BasicEntity implementation for CloudApplicationEntity.
+func (cae CloudApplicationEntity) AsCloudApplicationEntity() (*CloudApplicationEntity, bool) {
+ return &cae, true
+}
+
+// AsProcessEntity is the BasicEntity implementation for CloudApplicationEntity.
+func (cae CloudApplicationEntity) AsProcessEntity() (*ProcessEntity, bool) {
+ return nil, false
+}
+
+// AsDNSEntity is the BasicEntity implementation for CloudApplicationEntity.
+func (cae CloudApplicationEntity) AsDNSEntity() (*DNSEntity, bool) {
+ return nil, false
+}
+
+// AsIPEntity is the BasicEntity implementation for CloudApplicationEntity.
+func (cae CloudApplicationEntity) AsIPEntity() (*IPEntity, bool) {
+ return nil, false
+}
+
+// AsRegistryKeyEntity is the BasicEntity implementation for CloudApplicationEntity.
+func (cae CloudApplicationEntity) AsRegistryKeyEntity() (*RegistryKeyEntity, bool) {
+ return nil, false
+}
+
+// AsRegistryValueEntity is the BasicEntity implementation for CloudApplicationEntity.
+func (cae CloudApplicationEntity) AsRegistryValueEntity() (*RegistryValueEntity, bool) {
+ return nil, false
+}
+
+// AsURLEntity is the BasicEntity implementation for CloudApplicationEntity.
+func (cae CloudApplicationEntity) AsURLEntity() (*URLEntity, bool) {
+ return nil, false
+}
+
+// AsEntity is the BasicEntity implementation for CloudApplicationEntity.
+func (cae CloudApplicationEntity) AsEntity() (*Entity, bool) {
+ return nil, false
+}
+
+// AsBasicEntity is the BasicEntity implementation for CloudApplicationEntity.
+func (cae CloudApplicationEntity) AsBasicEntity() (BasicEntity, bool) {
+ return &cae, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for CloudApplicationEntity struct.
+func (cae *CloudApplicationEntity) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
@@ -2467,14 +3804,109 @@ func (ce *CloudError) UnmarshalJSON(body []byte) error {
}
for k, v := range m {
switch k {
- case "error":
+ case "properties":
if v != nil {
- var cloudErrorBody CloudErrorBody
- err = json.Unmarshal(*v, &cloudErrorBody)
+ var cloudApplicationEntityProperties CloudApplicationEntityProperties
+ err = json.Unmarshal(*v, &cloudApplicationEntityProperties)
if err != nil {
return err
}
- ce.CloudErrorBody = &cloudErrorBody
+ cae.CloudApplicationEntityProperties = &cloudApplicationEntityProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ cae.ID = &ID
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ cae.Type = &typeVar
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ cae.Name = &name
+ }
+ case "kind":
+ if v != nil {
+ var kind KindBasicEntity
+ err = json.Unmarshal(*v, &kind)
+ if err != nil {
+ return err
+ }
+ cae.Kind = kind
+ }
+ }
+ }
+
+ return nil
+}
+
+// CloudApplicationEntityProperties cloudApplication entity property bag.
+type CloudApplicationEntityProperties struct {
+ // AppID - READ-ONLY; The technical identifier of the application.
+ AppID *int32 `json:"appId,omitempty"`
+ // AppName - READ-ONLY; The name of the related cloud application.
+ AppName *string `json:"appName,omitempty"`
+ // InstanceName - READ-ONLY; The user defined instance name of the cloud application. It is often used to distinguish between several applications of the same type that a customer has.
+ InstanceName *string `json:"instanceName,omitempty"`
+ // FriendlyName - READ-ONLY; The graph item display name which is a short humanly readable description of the graph item instance. This property is optional and might be system generated.
+ FriendlyName *string `json:"friendlyName,omitempty"`
+ // AdditionalData - READ-ONLY; A bag of custom fields that should be part of the entity and will be presented to the user.
+ AdditionalData map[string]interface{} `json:"additionalData"`
+}
+
+// MarshalJSON is the custom marshaler for CloudApplicationEntityProperties.
+func (caep CloudApplicationEntityProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// CloudError error response structure.
+type CloudError struct {
+ // CloudErrorBody - Error data
+ *CloudErrorBody `json:"error,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for CloudError.
+func (ce CloudError) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if ce.CloudErrorBody != nil {
+ objectMap["error"] = ce.CloudErrorBody
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for CloudError struct.
+func (ce *CloudError) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "error":
+ if v != nil {
+ var cloudErrorBody CloudErrorBody
+ err = json.Unmarshal(*v, &cloudErrorBody)
+ if err != nil {
+ return err
+ }
+ ce.CloudErrorBody = &cloudErrorBody
}
}
}
@@ -2654,7 +4086,7 @@ type DataConnectorDataTypeCommon struct {
// DataConnectorKind1 describes an Azure resource with kind.
type DataConnectorKind1 struct {
- // Kind - The kind of the data connector. Possible values include: 'AzureActiveDirectory', 'AzureSecurityCenter', 'MicrosoftCloudAppSecurity', 'ThreatIntelligence', 'Office365', 'AmazonWebServicesCloudTrail', 'AzureAdvancedThreatProtection', 'MicrosoftDefenderAdvancedThreatProtection'
+ // Kind - The kind of the data connector. Possible values include: 'DataConnectorKindAzureActiveDirectory', 'DataConnectorKindAzureSecurityCenter', 'DataConnectorKindMicrosoftCloudAppSecurity', 'DataConnectorKindThreatIntelligence', 'DataConnectorKindOffice365', 'DataConnectorKindAmazonWebServicesCloudTrail', 'DataConnectorKindAzureAdvancedThreatProtection', 'DataConnectorKindMicrosoftDefenderAdvancedThreatProtection'
Kind DataConnectorKind `json:"kind,omitempty"`
}
@@ -2853,6 +4285,26 @@ func (dcm *DataConnectorModel) UnmarshalJSON(body []byte) error {
return nil
}
+// DataConnectorStatus alert rule template data connector status
+type DataConnectorStatus struct {
+ // ConnectorID - the connector id
+ ConnectorID *string `json:"connectorId,omitempty"`
+ // DataTypes - The data types availability map
+ DataTypes map[string]*DataTypeStatus `json:"dataTypes"`
+}
+
+// MarshalJSON is the custom marshaler for DataConnectorStatus.
+func (dcs DataConnectorStatus) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if dcs.ConnectorID != nil {
+ objectMap["connectorId"] = dcs.ConnectorID
+ }
+ if dcs.DataTypes != nil {
+ objectMap["dataTypes"] = dcs.DataTypes
+ }
+ return json.Marshal(objectMap)
+}
+
// DataConnectorTenantID properties data connector on tenant level.
type DataConnectorTenantID struct {
// TenantID - The tenant id to connect to, and get the data from.
@@ -2865,11 +4317,217 @@ type DataConnectorWithAlertsProperties struct {
DataTypes *AlertsDataTypeOfDataConnector `json:"dataTypes,omitempty"`
}
+// DNSEntity represents a dns entity.
+type DNSEntity struct {
+ // DNSEntityProperties - Dns entity properties
+ *DNSEntityProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Azure resource Id
+ ID *string `json:"id,omitempty"`
+ // Type - READ-ONLY; Azure resource type
+ Type *string `json:"type,omitempty"`
+ // Name - READ-ONLY; Azure resource name
+ Name *string `json:"name,omitempty"`
+ // Kind - Possible values include: 'KindEntity', 'KindAccount', 'KindHost', 'KindFile', 'KindSecurityAlert', 'KindFileHash', 'KindMalware', 'KindSecurityGroup', 'KindAzureResource', 'KindCloudApplication', 'KindProcess', 'KindDNSResolution', 'KindIP', 'KindRegistryKey', 'KindRegistryValue', 'KindURL'
+ Kind KindBasicEntity `json:"kind,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for DNSEntity.
+func (de DNSEntity) MarshalJSON() ([]byte, error) {
+ de.Kind = KindDNSResolution
+ objectMap := make(map[string]interface{})
+ if de.DNSEntityProperties != nil {
+ objectMap["properties"] = de.DNSEntityProperties
+ }
+ if de.Kind != "" {
+ objectMap["kind"] = de.Kind
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsAccountEntity is the BasicEntity implementation for DNSEntity.
+func (de DNSEntity) AsAccountEntity() (*AccountEntity, bool) {
+ return nil, false
+}
+
+// AsHostEntity is the BasicEntity implementation for DNSEntity.
+func (de DNSEntity) AsHostEntity() (*HostEntity, bool) {
+ return nil, false
+}
+
+// AsFileEntity is the BasicEntity implementation for DNSEntity.
+func (de DNSEntity) AsFileEntity() (*FileEntity, bool) {
+ return nil, false
+}
+
+// AsSecurityAlert is the BasicEntity implementation for DNSEntity.
+func (de DNSEntity) AsSecurityAlert() (*SecurityAlert, bool) {
+ return nil, false
+}
+
+// AsFileHashEntity is the BasicEntity implementation for DNSEntity.
+func (de DNSEntity) AsFileHashEntity() (*FileHashEntity, bool) {
+ return nil, false
+}
+
+// AsMalwareEntity is the BasicEntity implementation for DNSEntity.
+func (de DNSEntity) AsMalwareEntity() (*MalwareEntity, bool) {
+ return nil, false
+}
+
+// AsSecurityGroupEntity is the BasicEntity implementation for DNSEntity.
+func (de DNSEntity) AsSecurityGroupEntity() (*SecurityGroupEntity, bool) {
+ return nil, false
+}
+
+// AsAzureResourceEntity is the BasicEntity implementation for DNSEntity.
+func (de DNSEntity) AsAzureResourceEntity() (*AzureResourceEntity, bool) {
+ return nil, false
+}
+
+// AsCloudApplicationEntity is the BasicEntity implementation for DNSEntity.
+func (de DNSEntity) AsCloudApplicationEntity() (*CloudApplicationEntity, bool) {
+ return nil, false
+}
+
+// AsProcessEntity is the BasicEntity implementation for DNSEntity.
+func (de DNSEntity) AsProcessEntity() (*ProcessEntity, bool) {
+ return nil, false
+}
+
+// AsDNSEntity is the BasicEntity implementation for DNSEntity.
+func (de DNSEntity) AsDNSEntity() (*DNSEntity, bool) {
+ return &de, true
+}
+
+// AsIPEntity is the BasicEntity implementation for DNSEntity.
+func (de DNSEntity) AsIPEntity() (*IPEntity, bool) {
+ return nil, false
+}
+
+// AsRegistryKeyEntity is the BasicEntity implementation for DNSEntity.
+func (de DNSEntity) AsRegistryKeyEntity() (*RegistryKeyEntity, bool) {
+ return nil, false
+}
+
+// AsRegistryValueEntity is the BasicEntity implementation for DNSEntity.
+func (de DNSEntity) AsRegistryValueEntity() (*RegistryValueEntity, bool) {
+ return nil, false
+}
+
+// AsURLEntity is the BasicEntity implementation for DNSEntity.
+func (de DNSEntity) AsURLEntity() (*URLEntity, bool) {
+ return nil, false
+}
+
+// AsEntity is the BasicEntity implementation for DNSEntity.
+func (de DNSEntity) AsEntity() (*Entity, bool) {
+ return nil, false
+}
+
+// AsBasicEntity is the BasicEntity implementation for DNSEntity.
+func (de DNSEntity) AsBasicEntity() (BasicEntity, bool) {
+ return &de, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for DNSEntity struct.
+func (de *DNSEntity) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var DNSEntityProperties DNSEntityProperties
+ err = json.Unmarshal(*v, &DNSEntityProperties)
+ if err != nil {
+ return err
+ }
+ de.DNSEntityProperties = &DNSEntityProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ de.ID = &ID
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ de.Type = &typeVar
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ de.Name = &name
+ }
+ case "kind":
+ if v != nil {
+ var kind KindBasicEntity
+ err = json.Unmarshal(*v, &kind)
+ if err != nil {
+ return err
+ }
+ de.Kind = kind
+ }
+ }
+ }
+
+ return nil
+}
+
+// DNSEntityProperties dns entity property bag.
+type DNSEntityProperties struct {
+ // DomainName - READ-ONLY; The name of the dns record associated with the alert
+ DomainName *string `json:"domainName,omitempty"`
+ // IPAddressEntityIds - READ-ONLY; Ip entity identifiers for the resolved ip address.
+ IPAddressEntityIds *[]string `json:"ipAddressEntityIds,omitempty"`
+ // DNSServerIPEntityID - READ-ONLY; An ip entity id for the dns server resolving the request
+ DNSServerIPEntityID *string `json:"dnsServerIpEntityId,omitempty"`
+ // HostIPAddressEntityID - READ-ONLY; An ip entity id for the dns request client
+ HostIPAddressEntityID *string `json:"hostIpAddressEntityId,omitempty"`
+ // FriendlyName - READ-ONLY; The graph item display name which is a short humanly readable description of the graph item instance. This property is optional and might be system generated.
+ FriendlyName *string `json:"friendlyName,omitempty"`
+ // AdditionalData - READ-ONLY; A bag of custom fields that should be part of the entity and will be presented to the user.
+ AdditionalData map[string]interface{} `json:"additionalData"`
+}
+
+// MarshalJSON is the custom marshaler for DNSEntityProperties.
+func (dep DNSEntityProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// BasicEntity specific entity.
type BasicEntity interface {
AsAccountEntity() (*AccountEntity, bool)
AsHostEntity() (*HostEntity, bool)
AsFileEntity() (*FileEntity, bool)
+ AsSecurityAlert() (*SecurityAlert, bool)
+ AsFileHashEntity() (*FileHashEntity, bool)
+ AsMalwareEntity() (*MalwareEntity, bool)
+ AsSecurityGroupEntity() (*SecurityGroupEntity, bool)
+ AsAzureResourceEntity() (*AzureResourceEntity, bool)
+ AsCloudApplicationEntity() (*CloudApplicationEntity, bool)
+ AsProcessEntity() (*ProcessEntity, bool)
+ AsDNSEntity() (*DNSEntity, bool)
+ AsIPEntity() (*IPEntity, bool)
+ AsRegistryKeyEntity() (*RegistryKeyEntity, bool)
+ AsRegistryValueEntity() (*RegistryValueEntity, bool)
+ AsURLEntity() (*URLEntity, bool)
AsEntity() (*Entity, bool)
}
@@ -2882,7 +4540,7 @@ type Entity struct {
Type *string `json:"type,omitempty"`
// Name - READ-ONLY; Azure resource name
Name *string `json:"name,omitempty"`
- // Kind - Possible values include: 'KindEntity', 'KindAccount', 'KindHost', 'KindFile'
+ // Kind - Possible values include: 'KindEntity', 'KindAccount', 'KindHost', 'KindFile', 'KindSecurityAlert', 'KindFileHash', 'KindMalware', 'KindSecurityGroup', 'KindAzureResource', 'KindCloudApplication', 'KindProcess', 'KindDNSResolution', 'KindIP', 'KindRegistryKey', 'KindRegistryValue', 'KindURL'
Kind KindBasicEntity `json:"kind,omitempty"`
}
@@ -2906,6 +4564,54 @@ func unmarshalBasicEntity(body []byte) (BasicEntity, error) {
var fe FileEntity
err := json.Unmarshal(body, &fe)
return fe, err
+ case string(KindSecurityAlert):
+ var sa SecurityAlert
+ err := json.Unmarshal(body, &sa)
+ return sa, err
+ case string(KindFileHash):
+ var fhe FileHashEntity
+ err := json.Unmarshal(body, &fhe)
+ return fhe, err
+ case string(KindMalware):
+ var me MalwareEntity
+ err := json.Unmarshal(body, &me)
+ return me, err
+ case string(KindSecurityGroup):
+ var sge SecurityGroupEntity
+ err := json.Unmarshal(body, &sge)
+ return sge, err
+ case string(KindAzureResource):
+ var are AzureResourceEntity
+ err := json.Unmarshal(body, &are)
+ return are, err
+ case string(KindCloudApplication):
+ var cae CloudApplicationEntity
+ err := json.Unmarshal(body, &cae)
+ return cae, err
+ case string(KindProcess):
+ var peVar ProcessEntity
+ err := json.Unmarshal(body, &peVar)
+ return peVar, err
+ case string(KindDNSResolution):
+ var de DNSEntity
+ err := json.Unmarshal(body, &de)
+ return de, err
+ case string(KindIP):
+ var ie IPEntity
+ err := json.Unmarshal(body, &ie)
+ return ie, err
+ case string(KindRegistryKey):
+ var rke RegistryKeyEntity
+ err := json.Unmarshal(body, &rke)
+ return rke, err
+ case string(KindRegistryValue):
+ var rve RegistryValueEntity
+ err := json.Unmarshal(body, &rve)
+ return rve, err
+ case string(KindURL):
+ var ue URLEntity
+ err := json.Unmarshal(body, &ue)
+ return ue, err
default:
var e Entity
err := json.Unmarshal(body, &e)
@@ -2956,6 +4662,66 @@ func (e Entity) AsFileEntity() (*FileEntity, bool) {
return nil, false
}
+// AsSecurityAlert is the BasicEntity implementation for Entity.
+func (e Entity) AsSecurityAlert() (*SecurityAlert, bool) {
+ return nil, false
+}
+
+// AsFileHashEntity is the BasicEntity implementation for Entity.
+func (e Entity) AsFileHashEntity() (*FileHashEntity, bool) {
+ return nil, false
+}
+
+// AsMalwareEntity is the BasicEntity implementation for Entity.
+func (e Entity) AsMalwareEntity() (*MalwareEntity, bool) {
+ return nil, false
+}
+
+// AsSecurityGroupEntity is the BasicEntity implementation for Entity.
+func (e Entity) AsSecurityGroupEntity() (*SecurityGroupEntity, bool) {
+ return nil, false
+}
+
+// AsAzureResourceEntity is the BasicEntity implementation for Entity.
+func (e Entity) AsAzureResourceEntity() (*AzureResourceEntity, bool) {
+ return nil, false
+}
+
+// AsCloudApplicationEntity is the BasicEntity implementation for Entity.
+func (e Entity) AsCloudApplicationEntity() (*CloudApplicationEntity, bool) {
+ return nil, false
+}
+
+// AsProcessEntity is the BasicEntity implementation for Entity.
+func (e Entity) AsProcessEntity() (*ProcessEntity, bool) {
+ return nil, false
+}
+
+// AsDNSEntity is the BasicEntity implementation for Entity.
+func (e Entity) AsDNSEntity() (*DNSEntity, bool) {
+ return nil, false
+}
+
+// AsIPEntity is the BasicEntity implementation for Entity.
+func (e Entity) AsIPEntity() (*IPEntity, bool) {
+ return nil, false
+}
+
+// AsRegistryKeyEntity is the BasicEntity implementation for Entity.
+func (e Entity) AsRegistryKeyEntity() (*RegistryKeyEntity, bool) {
+ return nil, false
+}
+
+// AsRegistryValueEntity is the BasicEntity implementation for Entity.
+func (e Entity) AsRegistryValueEntity() (*RegistryValueEntity, bool) {
+ return nil, false
+}
+
+// AsURLEntity is the BasicEntity implementation for Entity.
+func (e Entity) AsURLEntity() (*URLEntity, bool) {
+ return nil, false
+}
+
// AsEntity is the BasicEntity implementation for Entity.
func (e Entity) AsEntity() (*Entity, bool) {
return &e, true
@@ -2966,9 +4732,71 @@ func (e Entity) AsBasicEntity() (BasicEntity, bool) {
return &e, true
}
-// EntityKind1 describes an Azure resource with kind.
+// EntityCommonProperties entity common property bag.
+type EntityCommonProperties struct {
+ // FriendlyName - READ-ONLY; The graph item display name which is a short humanly readable description of the graph item instance. This property is optional and might be system generated.
+ FriendlyName *string `json:"friendlyName,omitempty"`
+ // AdditionalData - READ-ONLY; A bag of custom fields that should be part of the entity and will be presented to the user.
+ AdditionalData map[string]interface{} `json:"additionalData"`
+}
+
+// MarshalJSON is the custom marshaler for EntityCommonProperties.
+func (ecp EntityCommonProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// EntityExpandParameters the parameters required to execute an expand operation on the given entity.
+type EntityExpandParameters struct {
+ // ExpansionID - The Id of the expansion to perform.
+ ExpansionID *uuid.UUID `json:"expansionId,omitempty"`
+ // StartTime - The start date filter, so the only expansion results returned are after this date.
+ StartTime *date.Time `json:"startTime,omitempty"`
+ // EndTime - The end date filter, so the only expansion results returned are before this date.
+ EndTime *date.Time `json:"endTime,omitempty"`
+}
+
+// EntityExpandResponse the entity expansion result operation response.
+type EntityExpandResponse struct {
+ autorest.Response `json:"-"`
+ // Value - The expansion result values.
+ Value *EntityExpandResponseValue `json:"value,omitempty"`
+ // MetaData - The metadata from the expansion operation results.
+ MetaData *ExpansionResultsMetadata `json:"metaData,omitempty"`
+}
+
+// EntityExpandResponseValue the expansion result values.
+type EntityExpandResponseValue struct {
+ // Entities - Array of the expansion result entities.
+ Entities *[]BasicEntity `json:"entities,omitempty"`
+}
+
+// UnmarshalJSON is the custom unmarshaler for EntityExpandResponseValue struct.
+func (eer *EntityExpandResponseValue) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "entities":
+ if v != nil {
+ entities, err := unmarshalBasicEntityArray(*v)
+ if err != nil {
+ return err
+ }
+ eer.Entities = &entities
+ }
+ }
+ }
+
+ return nil
+}
+
+// EntityKind1 describes an entity with kind.
type EntityKind1 struct {
- // Kind - The kind of the entity. Possible values include: 'Account', 'Host', 'File'
+ // Kind - The kind of the entity. Possible values include: 'EntityKindAccount', 'EntityKindHost', 'EntityKindFile', 'EntityKindAzureResource', 'EntityKindCloudApplication', 'EntityKindDNSResolution', 'EntityKindFileHash', 'EntityKindIP', 'EntityKindMalware', 'EntityKindProcess', 'EntityKindRegistryKey', 'EntityKindRegistryValue', 'EntityKindSecurityGroup', 'EntityKindURL', 'EntityKindSecurityAlert', 'EntityKindBookmark'
Kind EntityKind `json:"kind,omitempty"`
}
@@ -3390,18 +5218,36 @@ func NewEntityQueryListPage(getNextPage func(context.Context, EntityQueryList) (
type EntityQueryProperties struct {
// QueryTemplate - The template query string to be parsed and formatted
QueryTemplate *string `json:"queryTemplate,omitempty"`
- // InputEntityType - The type of the query's source entity
- InputEntityType *string `json:"inputEntityType,omitempty"`
+ // InputEntityType - The type of the query's source entity. Possible values include: 'EntityTypeAccount', 'EntityTypeHost', 'EntityTypeFile', 'EntityTypeAzureResource', 'EntityTypeCloudApplication', 'EntityTypeDNS', 'EntityTypeFileHash', 'EntityTypeIP', 'EntityTypeMalware', 'EntityTypeProcess', 'EntityTypeRegistryKey', 'EntityTypeRegistryValue', 'EntityTypeSecurityGroup', 'EntityTypeURL', 'EntityTypeSecurityAlert', 'EntityTypeHuntingBookmark'
+ InputEntityType EntityType `json:"inputEntityType,omitempty"`
// InputFields - List of the fields of the source entity that are required to run the query
InputFields *[]string `json:"inputFields,omitempty"`
// OutputEntityTypes - List of the desired output types to be constructed from the result
- OutputEntityTypes *[]string `json:"outputEntityTypes,omitempty"`
+ OutputEntityTypes *[]EntityType `json:"outputEntityTypes,omitempty"`
// DataSources - List of the data sources that are required to run the query
DataSources *[]string `json:"dataSources,omitempty"`
// DisplayName - The query display name
DisplayName *string `json:"displayName,omitempty"`
}
+// ExpansionResultAggregation information of a specific aggregation in the expansion result.
+type ExpansionResultAggregation struct {
+ // EntityKind - The kind of the aggregated entity. Possible values include: 'EntityKindAccount', 'EntityKindHost', 'EntityKindFile', 'EntityKindAzureResource', 'EntityKindCloudApplication', 'EntityKindDNSResolution', 'EntityKindFileHash', 'EntityKindIP', 'EntityKindMalware', 'EntityKindProcess', 'EntityKindRegistryKey', 'EntityKindRegistryValue', 'EntityKindSecurityGroup', 'EntityKindURL', 'EntityKindSecurityAlert', 'EntityKindBookmark'
+ EntityKind EntityKind `json:"entityKind,omitempty"`
+ // Count - Total number of aggregations of the given kind (and aggregationType if given) in the expansion result.
+ Count *int32 `json:"count,omitempty"`
+ // AggregationType - The common type of the aggregation. (for e.g. entity field name)
+ AggregationType *string `json:"aggregationType,omitempty"`
+ // DisplayName - The display name of the aggregation by type.
+ DisplayName *string `json:"displayName,omitempty"`
+}
+
+// ExpansionResultsMetadata expansion result metadata.
+type ExpansionResultsMetadata struct {
+ // Aggregations - Information of the aggregated nodes in the expansion result.
+ Aggregations *[]ExpansionResultAggregation `json:"aggregations,omitempty"`
+}
+
// FileEntity represents a file entity.
type FileEntity struct {
// FileEntityProperties - File entity properties
@@ -3412,7 +5258,7 @@ type FileEntity struct {
Type *string `json:"type,omitempty"`
// Name - READ-ONLY; Azure resource name
Name *string `json:"name,omitempty"`
- // Kind - Possible values include: 'KindEntity', 'KindAccount', 'KindHost', 'KindFile'
+ // Kind - Possible values include: 'KindEntity', 'KindAccount', 'KindHost', 'KindFile', 'KindSecurityAlert', 'KindFileHash', 'KindMalware', 'KindSecurityGroup', 'KindAzureResource', 'KindCloudApplication', 'KindProcess', 'KindDNSResolution', 'KindIP', 'KindRegistryKey', 'KindRegistryValue', 'KindURL'
Kind KindBasicEntity `json:"kind,omitempty"`
}
@@ -3444,22 +5290,82 @@ func (fe FileEntity) AsFileEntity() (*FileEntity, bool) {
return &fe, true
}
-// AsEntity is the BasicEntity implementation for FileEntity.
-func (fe FileEntity) AsEntity() (*Entity, bool) {
+// AsSecurityAlert is the BasicEntity implementation for FileEntity.
+func (fe FileEntity) AsSecurityAlert() (*SecurityAlert, bool) {
return nil, false
}
-// AsBasicEntity is the BasicEntity implementation for FileEntity.
-func (fe FileEntity) AsBasicEntity() (BasicEntity, bool) {
- return &fe, true
+// AsFileHashEntity is the BasicEntity implementation for FileEntity.
+func (fe FileEntity) AsFileHashEntity() (*FileHashEntity, bool) {
+ return nil, false
}
-// UnmarshalJSON is the custom unmarshaler for FileEntity struct.
-func (fe *FileEntity) UnmarshalJSON(body []byte) error {
- var m map[string]*json.RawMessage
- err := json.Unmarshal(body, &m)
- if err != nil {
- return err
+// AsMalwareEntity is the BasicEntity implementation for FileEntity.
+func (fe FileEntity) AsMalwareEntity() (*MalwareEntity, bool) {
+ return nil, false
+}
+
+// AsSecurityGroupEntity is the BasicEntity implementation for FileEntity.
+func (fe FileEntity) AsSecurityGroupEntity() (*SecurityGroupEntity, bool) {
+ return nil, false
+}
+
+// AsAzureResourceEntity is the BasicEntity implementation for FileEntity.
+func (fe FileEntity) AsAzureResourceEntity() (*AzureResourceEntity, bool) {
+ return nil, false
+}
+
+// AsCloudApplicationEntity is the BasicEntity implementation for FileEntity.
+func (fe FileEntity) AsCloudApplicationEntity() (*CloudApplicationEntity, bool) {
+ return nil, false
+}
+
+// AsProcessEntity is the BasicEntity implementation for FileEntity.
+func (fe FileEntity) AsProcessEntity() (*ProcessEntity, bool) {
+ return nil, false
+}
+
+// AsDNSEntity is the BasicEntity implementation for FileEntity.
+func (fe FileEntity) AsDNSEntity() (*DNSEntity, bool) {
+ return nil, false
+}
+
+// AsIPEntity is the BasicEntity implementation for FileEntity.
+func (fe FileEntity) AsIPEntity() (*IPEntity, bool) {
+ return nil, false
+}
+
+// AsRegistryKeyEntity is the BasicEntity implementation for FileEntity.
+func (fe FileEntity) AsRegistryKeyEntity() (*RegistryKeyEntity, bool) {
+ return nil, false
+}
+
+// AsRegistryValueEntity is the BasicEntity implementation for FileEntity.
+func (fe FileEntity) AsRegistryValueEntity() (*RegistryValueEntity, bool) {
+ return nil, false
+}
+
+// AsURLEntity is the BasicEntity implementation for FileEntity.
+func (fe FileEntity) AsURLEntity() (*URLEntity, bool) {
+ return nil, false
+}
+
+// AsEntity is the BasicEntity implementation for FileEntity.
+func (fe FileEntity) AsEntity() (*Entity, bool) {
+ return nil, false
+}
+
+// AsBasicEntity is the BasicEntity implementation for FileEntity.
+func (fe FileEntity) AsBasicEntity() (BasicEntity, bool) {
+ return &fe, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for FileEntity struct.
+func (fe *FileEntity) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
}
for k, v := range m {
switch k {
@@ -3520,62 +5426,136 @@ type FileEntityProperties struct {
Directory *string `json:"directory,omitempty"`
// FileName - READ-ONLY; The file name without path (some alerts might not include path).
FileName *string `json:"fileName,omitempty"`
+ // HostEntityID - READ-ONLY; The Host entity id which the file belongs to
+ HostEntityID *string `json:"hostEntityId,omitempty"`
+ // FileHashEntityIds - READ-ONLY; The file hash entity identifiers associated with this file
+ FileHashEntityIds *[]string `json:"fileHashEntityIds,omitempty"`
+ // FriendlyName - READ-ONLY; The graph item display name which is a short humanly readable description of the graph item instance. This property is optional and might be system generated.
+ FriendlyName *string `json:"friendlyName,omitempty"`
+ // AdditionalData - READ-ONLY; A bag of custom fields that should be part of the entity and will be presented to the user.
+ AdditionalData map[string]interface{} `json:"additionalData"`
+}
+
+// MarshalJSON is the custom marshaler for FileEntityProperties.
+func (fep FileEntityProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
}
-// HostEntity represents a host entity.
-type HostEntity struct {
- // HostEntityProperties - Host entity properties
- *HostEntityProperties `json:"properties,omitempty"`
+// FileHashEntity represents a file hash entity.
+type FileHashEntity struct {
+ // FileHashEntityProperties - FileHash entity properties
+ *FileHashEntityProperties `json:"properties,omitempty"`
// ID - READ-ONLY; Azure resource Id
ID *string `json:"id,omitempty"`
// Type - READ-ONLY; Azure resource type
Type *string `json:"type,omitempty"`
// Name - READ-ONLY; Azure resource name
Name *string `json:"name,omitempty"`
- // Kind - Possible values include: 'KindEntity', 'KindAccount', 'KindHost', 'KindFile'
+ // Kind - Possible values include: 'KindEntity', 'KindAccount', 'KindHost', 'KindFile', 'KindSecurityAlert', 'KindFileHash', 'KindMalware', 'KindSecurityGroup', 'KindAzureResource', 'KindCloudApplication', 'KindProcess', 'KindDNSResolution', 'KindIP', 'KindRegistryKey', 'KindRegistryValue', 'KindURL'
Kind KindBasicEntity `json:"kind,omitempty"`
}
-// MarshalJSON is the custom marshaler for HostEntity.
-func (he HostEntity) MarshalJSON() ([]byte, error) {
- he.Kind = KindHost
+// MarshalJSON is the custom marshaler for FileHashEntity.
+func (fhe FileHashEntity) MarshalJSON() ([]byte, error) {
+ fhe.Kind = KindFileHash
objectMap := make(map[string]interface{})
- if he.HostEntityProperties != nil {
- objectMap["properties"] = he.HostEntityProperties
+ if fhe.FileHashEntityProperties != nil {
+ objectMap["properties"] = fhe.FileHashEntityProperties
}
- if he.Kind != "" {
- objectMap["kind"] = he.Kind
+ if fhe.Kind != "" {
+ objectMap["kind"] = fhe.Kind
}
return json.Marshal(objectMap)
}
-// AsAccountEntity is the BasicEntity implementation for HostEntity.
-func (he HostEntity) AsAccountEntity() (*AccountEntity, bool) {
+// AsAccountEntity is the BasicEntity implementation for FileHashEntity.
+func (fhe FileHashEntity) AsAccountEntity() (*AccountEntity, bool) {
return nil, false
}
-// AsHostEntity is the BasicEntity implementation for HostEntity.
-func (he HostEntity) AsHostEntity() (*HostEntity, bool) {
- return &he, true
+// AsHostEntity is the BasicEntity implementation for FileHashEntity.
+func (fhe FileHashEntity) AsHostEntity() (*HostEntity, bool) {
+ return nil, false
}
-// AsFileEntity is the BasicEntity implementation for HostEntity.
-func (he HostEntity) AsFileEntity() (*FileEntity, bool) {
+// AsFileEntity is the BasicEntity implementation for FileHashEntity.
+func (fhe FileHashEntity) AsFileEntity() (*FileEntity, bool) {
return nil, false
}
-// AsEntity is the BasicEntity implementation for HostEntity.
-func (he HostEntity) AsEntity() (*Entity, bool) {
+// AsSecurityAlert is the BasicEntity implementation for FileHashEntity.
+func (fhe FileHashEntity) AsSecurityAlert() (*SecurityAlert, bool) {
return nil, false
}
-// AsBasicEntity is the BasicEntity implementation for HostEntity.
-func (he HostEntity) AsBasicEntity() (BasicEntity, bool) {
- return &he, true
+// AsFileHashEntity is the BasicEntity implementation for FileHashEntity.
+func (fhe FileHashEntity) AsFileHashEntity() (*FileHashEntity, bool) {
+ return &fhe, true
}
-// UnmarshalJSON is the custom unmarshaler for HostEntity struct.
-func (he *HostEntity) UnmarshalJSON(body []byte) error {
+// AsMalwareEntity is the BasicEntity implementation for FileHashEntity.
+func (fhe FileHashEntity) AsMalwareEntity() (*MalwareEntity, bool) {
+ return nil, false
+}
+
+// AsSecurityGroupEntity is the BasicEntity implementation for FileHashEntity.
+func (fhe FileHashEntity) AsSecurityGroupEntity() (*SecurityGroupEntity, bool) {
+ return nil, false
+}
+
+// AsAzureResourceEntity is the BasicEntity implementation for FileHashEntity.
+func (fhe FileHashEntity) AsAzureResourceEntity() (*AzureResourceEntity, bool) {
+ return nil, false
+}
+
+// AsCloudApplicationEntity is the BasicEntity implementation for FileHashEntity.
+func (fhe FileHashEntity) AsCloudApplicationEntity() (*CloudApplicationEntity, bool) {
+ return nil, false
+}
+
+// AsProcessEntity is the BasicEntity implementation for FileHashEntity.
+func (fhe FileHashEntity) AsProcessEntity() (*ProcessEntity, bool) {
+ return nil, false
+}
+
+// AsDNSEntity is the BasicEntity implementation for FileHashEntity.
+func (fhe FileHashEntity) AsDNSEntity() (*DNSEntity, bool) {
+ return nil, false
+}
+
+// AsIPEntity is the BasicEntity implementation for FileHashEntity.
+func (fhe FileHashEntity) AsIPEntity() (*IPEntity, bool) {
+ return nil, false
+}
+
+// AsRegistryKeyEntity is the BasicEntity implementation for FileHashEntity.
+func (fhe FileHashEntity) AsRegistryKeyEntity() (*RegistryKeyEntity, bool) {
+ return nil, false
+}
+
+// AsRegistryValueEntity is the BasicEntity implementation for FileHashEntity.
+func (fhe FileHashEntity) AsRegistryValueEntity() (*RegistryValueEntity, bool) {
+ return nil, false
+}
+
+// AsURLEntity is the BasicEntity implementation for FileHashEntity.
+func (fhe FileHashEntity) AsURLEntity() (*URLEntity, bool) {
+ return nil, false
+}
+
+// AsEntity is the BasicEntity implementation for FileHashEntity.
+func (fhe FileHashEntity) AsEntity() (*Entity, bool) {
+ return nil, false
+}
+
+// AsBasicEntity is the BasicEntity implementation for FileHashEntity.
+func (fhe FileHashEntity) AsBasicEntity() (BasicEntity, bool) {
+ return &fhe, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for FileHashEntity struct.
+func (fhe *FileHashEntity) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
@@ -3585,12 +5565,12 @@ func (he *HostEntity) UnmarshalJSON(body []byte) error {
switch k {
case "properties":
if v != nil {
- var hostEntityProperties HostEntityProperties
- err = json.Unmarshal(*v, &hostEntityProperties)
+ var fileHashEntityProperties FileHashEntityProperties
+ err = json.Unmarshal(*v, &fileHashEntityProperties)
if err != nil {
return err
}
- he.HostEntityProperties = &hostEntityProperties
+ fhe.FileHashEntityProperties = &fileHashEntityProperties
}
case "id":
if v != nil {
@@ -3599,7 +5579,7 @@ func (he *HostEntity) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- he.ID = &ID
+ fhe.ID = &ID
}
case "type":
if v != nil {
@@ -3608,7 +5588,7 @@ func (he *HostEntity) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- he.Type = &typeVar
+ fhe.Type = &typeVar
}
case "name":
if v != nil {
@@ -3617,7 +5597,7 @@ func (he *HostEntity) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- he.Name = &name
+ fhe.Name = &name
}
case "kind":
if v != nil {
@@ -3626,7 +5606,7 @@ func (he *HostEntity) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- he.Kind = kind
+ fhe.Kind = kind
}
}
}
@@ -3634,112 +5614,83 @@ func (he *HostEntity) UnmarshalJSON(body []byte) error {
return nil
}
-// HostEntityProperties host entity property bag.
-type HostEntityProperties struct {
- // DNSDomain - READ-ONLY; The DNS domain that this host belongs to. Should contain the compete DNS suffix for the domain
- DNSDomain *string `json:"dnsDomain,omitempty"`
- // NtDomain - READ-ONLY; The NT domain that this host belongs to.
- NtDomain *string `json:"ntDomain,omitempty"`
- // HostName - READ-ONLY; The hostname without the domain suffix.
- HostName *string `json:"hostName,omitempty"`
- // NetBiosName - READ-ONLY; The host name (pre-windows2000).
- NetBiosName *string `json:"netBiosName,omitempty"`
- // AzureID - READ-ONLY; The azure resource id of the VM.
- AzureID *string `json:"azureID,omitempty"`
- // OmsAgentID - READ-ONLY; The OMS agent id, if the host has OMS agent installed.
- OmsAgentID *string `json:"omsAgentID,omitempty"`
- // OsFamily - The operating system type. Possible values include: 'Linux', 'Windows', 'Android', 'IOS'
- OsFamily OSFamily `json:"osFamily,omitempty"`
- // OsVersion - READ-ONLY; A free text representation of the operating system. This field is meant to hold specific versions the are more fine grained than OSFamily or future values not supported by OSFamily enumeration
- OsVersion *string `json:"osVersion,omitempty"`
- // IsDomainJoined - READ-ONLY; Determines whether this host belongs to a domain.
- IsDomainJoined *bool `json:"isDomainJoined,omitempty"`
+// FileHashEntityProperties fileHash entity property bag.
+type FileHashEntityProperties struct {
+ // HashValue - READ-ONLY; The file hash value.
+ HashValue *string `json:"hashValue,omitempty"`
+ // Algorithm - READ-ONLY; The hash algorithm type. Possible values include: 'Unknown', 'MD5', 'SHA1', 'SHA256', 'SHA256AC'
+ Algorithm FileHashAlgorithm `json:"algorithm,omitempty"`
+ // FriendlyName - READ-ONLY; The graph item display name which is a short humanly readable description of the graph item instance. This property is optional and might be system generated.
+ FriendlyName *string `json:"friendlyName,omitempty"`
+ // AdditionalData - READ-ONLY; A bag of custom fields that should be part of the entity and will be presented to the user.
+ AdditionalData map[string]interface{} `json:"additionalData"`
}
-// MCASDataConnector represents MCAS (Microsoft Cloud App Security) data connector.
-type MCASDataConnector struct {
- // MCASDataConnectorProperties - MCAS (Microsoft Cloud App Security) data connector properties.
- *MCASDataConnectorProperties `json:"properties,omitempty"`
+// MarshalJSON is the custom marshaler for FileHashEntityProperties.
+func (fhep FileHashEntityProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// FilterAlertRuleTemplate represents filter alert rule template.
+type FilterAlertRuleTemplate struct {
+ // FilterAlertRuleTemplateProperties - Filter alert rule template properties
+ *FilterAlertRuleTemplateProperties `json:"properties,omitempty"`
// ID - READ-ONLY; Azure resource Id
ID *string `json:"id,omitempty"`
// Type - READ-ONLY; Azure resource type
Type *string `json:"type,omitempty"`
// Name - READ-ONLY; Azure resource name
Name *string `json:"name,omitempty"`
- // Etag - Etag of the data connector.
+ // Etag - Etag of the alert rule.
Etag *string `json:"etag,omitempty"`
- // Kind - Possible values include: 'KindDataConnector', 'KindOffice365', 'KindThreatIntelligence', 'KindAmazonWebServicesCloudTrail', 'KindAzureActiveDirectory', 'KindAzureSecurityCenter', 'KindMicrosoftCloudAppSecurity', 'KindAzureAdvancedThreatProtection', 'KindMicrosoftDefenderAdvancedThreatProtection'
- Kind KindBasicDataConnector `json:"kind,omitempty"`
+ // Kind - Possible values include: 'KindBasicAlertRuleTemplateKindAlertRuleTemplate', 'KindBasicAlertRuleTemplateKindScheduled', 'KindBasicAlertRuleTemplateKindFilter', 'KindBasicAlertRuleTemplateKindFusion'
+ Kind KindBasicAlertRuleTemplate `json:"kind,omitempty"`
}
-// MarshalJSON is the custom marshaler for MCASDataConnector.
-func (mdc MCASDataConnector) MarshalJSON() ([]byte, error) {
- mdc.Kind = KindMicrosoftCloudAppSecurity
+// MarshalJSON is the custom marshaler for FilterAlertRuleTemplate.
+func (fart FilterAlertRuleTemplate) MarshalJSON() ([]byte, error) {
+ fart.Kind = KindBasicAlertRuleTemplateKindFilter
objectMap := make(map[string]interface{})
- if mdc.MCASDataConnectorProperties != nil {
- objectMap["properties"] = mdc.MCASDataConnectorProperties
+ if fart.FilterAlertRuleTemplateProperties != nil {
+ objectMap["properties"] = fart.FilterAlertRuleTemplateProperties
}
- if mdc.Etag != nil {
- objectMap["etag"] = mdc.Etag
+ if fart.Etag != nil {
+ objectMap["etag"] = fart.Etag
}
- if mdc.Kind != "" {
- objectMap["kind"] = mdc.Kind
+ if fart.Kind != "" {
+ objectMap["kind"] = fart.Kind
}
return json.Marshal(objectMap)
}
-// AsOfficeDataConnector is the BasicDataConnector implementation for MCASDataConnector.
-func (mdc MCASDataConnector) AsOfficeDataConnector() (*OfficeDataConnector, bool) {
- return nil, false
-}
-
-// AsTIDataConnector is the BasicDataConnector implementation for MCASDataConnector.
-func (mdc MCASDataConnector) AsTIDataConnector() (*TIDataConnector, bool) {
- return nil, false
-}
-
-// AsAwsCloudTrailDataConnector is the BasicDataConnector implementation for MCASDataConnector.
-func (mdc MCASDataConnector) AsAwsCloudTrailDataConnector() (*AwsCloudTrailDataConnector, bool) {
- return nil, false
-}
-
-// AsAADDataConnector is the BasicDataConnector implementation for MCASDataConnector.
-func (mdc MCASDataConnector) AsAADDataConnector() (*AADDataConnector, bool) {
- return nil, false
-}
-
-// AsASCDataConnector is the BasicDataConnector implementation for MCASDataConnector.
-func (mdc MCASDataConnector) AsASCDataConnector() (*ASCDataConnector, bool) {
+// AsScheduledAlertRuleTemplate is the BasicAlertRuleTemplate implementation for FilterAlertRuleTemplate.
+func (fart FilterAlertRuleTemplate) AsScheduledAlertRuleTemplate() (*ScheduledAlertRuleTemplate, bool) {
return nil, false
}
-// AsMCASDataConnector is the BasicDataConnector implementation for MCASDataConnector.
-func (mdc MCASDataConnector) AsMCASDataConnector() (*MCASDataConnector, bool) {
- return &mdc, true
-}
-
-// AsAATPDataConnector is the BasicDataConnector implementation for MCASDataConnector.
-func (mdc MCASDataConnector) AsAATPDataConnector() (*AATPDataConnector, bool) {
- return nil, false
+// AsFilterAlertRuleTemplate is the BasicAlertRuleTemplate implementation for FilterAlertRuleTemplate.
+func (fart FilterAlertRuleTemplate) AsFilterAlertRuleTemplate() (*FilterAlertRuleTemplate, bool) {
+ return &fart, true
}
-// AsMDATPDataConnector is the BasicDataConnector implementation for MCASDataConnector.
-func (mdc MCASDataConnector) AsMDATPDataConnector() (*MDATPDataConnector, bool) {
+// AsFusionAlertRuleTemplate is the BasicAlertRuleTemplate implementation for FilterAlertRuleTemplate.
+func (fart FilterAlertRuleTemplate) AsFusionAlertRuleTemplate() (*FusionAlertRuleTemplate, bool) {
return nil, false
}
-// AsDataConnector is the BasicDataConnector implementation for MCASDataConnector.
-func (mdc MCASDataConnector) AsDataConnector() (*DataConnector, bool) {
+// AsAlertRuleTemplate is the BasicAlertRuleTemplate implementation for FilterAlertRuleTemplate.
+func (fart FilterAlertRuleTemplate) AsAlertRuleTemplate() (*AlertRuleTemplate, bool) {
return nil, false
}
-// AsBasicDataConnector is the BasicDataConnector implementation for MCASDataConnector.
-func (mdc MCASDataConnector) AsBasicDataConnector() (BasicDataConnector, bool) {
- return &mdc, true
+// AsBasicAlertRuleTemplate is the BasicAlertRuleTemplate implementation for FilterAlertRuleTemplate.
+func (fart FilterAlertRuleTemplate) AsBasicAlertRuleTemplate() (BasicAlertRuleTemplate, bool) {
+ return &fart, true
}
-// UnmarshalJSON is the custom unmarshaler for MCASDataConnector struct.
-func (mdc *MCASDataConnector) UnmarshalJSON(body []byte) error {
+// UnmarshalJSON is the custom unmarshaler for FilterAlertRuleTemplate struct.
+func (fart *FilterAlertRuleTemplate) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
@@ -3749,12 +5700,12 @@ func (mdc *MCASDataConnector) UnmarshalJSON(body []byte) error {
switch k {
case "properties":
if v != nil {
- var mCASDataConnectorProperties MCASDataConnectorProperties
- err = json.Unmarshal(*v, &mCASDataConnectorProperties)
+ var filterAlertRuleTemplateProperties FilterAlertRuleTemplateProperties
+ err = json.Unmarshal(*v, &filterAlertRuleTemplateProperties)
if err != nil {
return err
}
- mdc.MCASDataConnectorProperties = &mCASDataConnectorProperties
+ fart.FilterAlertRuleTemplateProperties = &filterAlertRuleTemplateProperties
}
case "id":
if v != nil {
@@ -3763,7 +5714,7 @@ func (mdc *MCASDataConnector) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- mdc.ID = &ID
+ fart.ID = &ID
}
case "type":
if v != nil {
@@ -3772,7 +5723,7 @@ func (mdc *MCASDataConnector) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- mdc.Type = &typeVar
+ fart.Type = &typeVar
}
case "name":
if v != nil {
@@ -3781,7 +5732,7 @@ func (mdc *MCASDataConnector) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- mdc.Name = &name
+ fart.Name = &name
}
case "etag":
if v != nil {
@@ -3790,16 +5741,16 @@ func (mdc *MCASDataConnector) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- mdc.Etag = &etag
+ fart.Etag = &etag
}
case "kind":
if v != nil {
- var kind KindBasicDataConnector
+ var kind KindBasicAlertRuleTemplate
err = json.Unmarshal(*v, &kind)
if err != nil {
return err
}
- mdc.Kind = kind
+ fart.Kind = kind
}
}
}
@@ -3807,113 +5758,99 @@ func (mdc *MCASDataConnector) UnmarshalJSON(body []byte) error {
return nil
}
-// MCASDataConnectorDataTypes the available data types for MCAS (Microsoft Cloud App Security) data
-// connector.
-type MCASDataConnectorDataTypes struct {
- // DiscoveryLogs - Discovery log data type connection.
- DiscoveryLogs *MCASDataConnectorDataTypesDiscoveryLogs `json:"discoveryLogs,omitempty"`
- // Alerts - Alerts data type connection.
- Alerts *AlertsDataTypeOfDataConnectorAlerts `json:"alerts,omitempty"`
-}
-
-// MCASDataConnectorDataTypesDiscoveryLogs discovery log data type connection.
-type MCASDataConnectorDataTypesDiscoveryLogs struct {
- // State - Describe whether this data type connection is enabled or not. Possible values include: 'Enabled', 'Disabled'
- State DataTypeState `json:"state,omitempty"`
-}
-
-// MCASDataConnectorProperties MCAS (Microsoft Cloud App Security) data connector properties.
-type MCASDataConnectorProperties struct {
- // DataTypes - The available data types for the connector.
- DataTypes *MCASDataConnectorDataTypes `json:"dataTypes,omitempty"`
- // TenantID - The tenant id to connect to, and get the data from.
- TenantID *string `json:"tenantId,omitempty"`
-}
-
-// MDATPDataConnector represents MDATP (Microsoft Defender Advanced Threat Protection) data connector.
-type MDATPDataConnector struct {
- // MDATPDataConnectorProperties - MDATP (Microsoft Defender Advanced Threat Protection) data connector properties.
- *MDATPDataConnectorProperties `json:"properties,omitempty"`
+// FilterAlertRuleTemplateProperties filter alert rule template properties
+type FilterAlertRuleTemplateProperties struct {
+ // DisplayName - The display name for alert rule template.
+ DisplayName *string `json:"displayName,omitempty"`
+ // Description - The description of the alert rule template.
+ Description *string `json:"description,omitempty"`
+ // Tactics - The tactics of the alert rule template
+ Tactics *[]AttackTactic `json:"tactics,omitempty"`
+ // CreatedDateUTC - READ-ONLY; The time that this alert rule template has been added.
+ CreatedDateUTC *string `json:"createdDateUTC,omitempty"`
+ // Status - The alert rule template status. Possible values include: 'Installed', 'Available', 'NotAvailable'
+ Status TemplateStatus `json:"status,omitempty"`
+ // RequiredDataConnectors - The required data connectors for this template
+ RequiredDataConnectors *[]DataConnectorStatus `json:"requiredDataConnectors,omitempty"`
+ // AlertRulesCreatedByTemplateCount - the number of alert rules that were created by this template
+ AlertRulesCreatedByTemplateCount *int32 `json:"alertRulesCreatedByTemplateCount,omitempty"`
+ // FilterProduct - The filter product name for this template rule.
+ FilterProduct *string `json:"filterProduct,omitempty"`
+ // FilterSeverities - the alert’s severities on which the cases will be generated
+ FilterSeverities *[]AlertSeverity `json:"filterSeverities,omitempty"`
+ // FilterTitles - the alert’s titles on which the cases will be generated
+ FilterTitles *[]string `json:"filterTitles,omitempty"`
+}
+
+// FilterAlertRuleTemplatePropertiesModel filter alert rule template property bag.
+type FilterAlertRuleTemplatePropertiesModel struct {
+ // FilterProduct - The filter product name for this template rule.
+ FilterProduct *string `json:"filterProduct,omitempty"`
+ // FilterSeverities - the alert’s severities on which the cases will be generated
+ FilterSeverities *[]AlertSeverity `json:"filterSeverities,omitempty"`
+ // FilterTitles - the alert’s titles on which the cases will be generated
+ FilterTitles *[]string `json:"filterTitles,omitempty"`
+}
+
+// FusionAlertRuleTemplate represents fusion alert rule template.
+type FusionAlertRuleTemplate struct {
+ // FusionAlertRuleTemplateProperties - Fusion alert rule template properties
+ *FusionAlertRuleTemplateProperties `json:"properties,omitempty"`
// ID - READ-ONLY; Azure resource Id
ID *string `json:"id,omitempty"`
// Type - READ-ONLY; Azure resource type
Type *string `json:"type,omitempty"`
// Name - READ-ONLY; Azure resource name
Name *string `json:"name,omitempty"`
- // Etag - Etag of the data connector.
+ // Etag - Etag of the alert rule.
Etag *string `json:"etag,omitempty"`
- // Kind - Possible values include: 'KindDataConnector', 'KindOffice365', 'KindThreatIntelligence', 'KindAmazonWebServicesCloudTrail', 'KindAzureActiveDirectory', 'KindAzureSecurityCenter', 'KindMicrosoftCloudAppSecurity', 'KindAzureAdvancedThreatProtection', 'KindMicrosoftDefenderAdvancedThreatProtection'
- Kind KindBasicDataConnector `json:"kind,omitempty"`
+ // Kind - Possible values include: 'KindBasicAlertRuleTemplateKindAlertRuleTemplate', 'KindBasicAlertRuleTemplateKindScheduled', 'KindBasicAlertRuleTemplateKindFilter', 'KindBasicAlertRuleTemplateKindFusion'
+ Kind KindBasicAlertRuleTemplate `json:"kind,omitempty"`
}
-// MarshalJSON is the custom marshaler for MDATPDataConnector.
-func (mdc MDATPDataConnector) MarshalJSON() ([]byte, error) {
- mdc.Kind = KindMicrosoftDefenderAdvancedThreatProtection
+// MarshalJSON is the custom marshaler for FusionAlertRuleTemplate.
+func (fart FusionAlertRuleTemplate) MarshalJSON() ([]byte, error) {
+ fart.Kind = KindBasicAlertRuleTemplateKindFusion
objectMap := make(map[string]interface{})
- if mdc.MDATPDataConnectorProperties != nil {
- objectMap["properties"] = mdc.MDATPDataConnectorProperties
+ if fart.FusionAlertRuleTemplateProperties != nil {
+ objectMap["properties"] = fart.FusionAlertRuleTemplateProperties
}
- if mdc.Etag != nil {
- objectMap["etag"] = mdc.Etag
+ if fart.Etag != nil {
+ objectMap["etag"] = fart.Etag
}
- if mdc.Kind != "" {
- objectMap["kind"] = mdc.Kind
+ if fart.Kind != "" {
+ objectMap["kind"] = fart.Kind
}
return json.Marshal(objectMap)
}
-// AsOfficeDataConnector is the BasicDataConnector implementation for MDATPDataConnector.
-func (mdc MDATPDataConnector) AsOfficeDataConnector() (*OfficeDataConnector, bool) {
+// AsScheduledAlertRuleTemplate is the BasicAlertRuleTemplate implementation for FusionAlertRuleTemplate.
+func (fart FusionAlertRuleTemplate) AsScheduledAlertRuleTemplate() (*ScheduledAlertRuleTemplate, bool) {
return nil, false
}
-// AsTIDataConnector is the BasicDataConnector implementation for MDATPDataConnector.
-func (mdc MDATPDataConnector) AsTIDataConnector() (*TIDataConnector, bool) {
+// AsFilterAlertRuleTemplate is the BasicAlertRuleTemplate implementation for FusionAlertRuleTemplate.
+func (fart FusionAlertRuleTemplate) AsFilterAlertRuleTemplate() (*FilterAlertRuleTemplate, bool) {
return nil, false
}
-// AsAwsCloudTrailDataConnector is the BasicDataConnector implementation for MDATPDataConnector.
-func (mdc MDATPDataConnector) AsAwsCloudTrailDataConnector() (*AwsCloudTrailDataConnector, bool) {
- return nil, false
+// AsFusionAlertRuleTemplate is the BasicAlertRuleTemplate implementation for FusionAlertRuleTemplate.
+func (fart FusionAlertRuleTemplate) AsFusionAlertRuleTemplate() (*FusionAlertRuleTemplate, bool) {
+ return &fart, true
}
-// AsAADDataConnector is the BasicDataConnector implementation for MDATPDataConnector.
-func (mdc MDATPDataConnector) AsAADDataConnector() (*AADDataConnector, bool) {
+// AsAlertRuleTemplate is the BasicAlertRuleTemplate implementation for FusionAlertRuleTemplate.
+func (fart FusionAlertRuleTemplate) AsAlertRuleTemplate() (*AlertRuleTemplate, bool) {
return nil, false
}
-// AsASCDataConnector is the BasicDataConnector implementation for MDATPDataConnector.
-func (mdc MDATPDataConnector) AsASCDataConnector() (*ASCDataConnector, bool) {
- return nil, false
-}
-
-// AsMCASDataConnector is the BasicDataConnector implementation for MDATPDataConnector.
-func (mdc MDATPDataConnector) AsMCASDataConnector() (*MCASDataConnector, bool) {
- return nil, false
-}
-
-// AsAATPDataConnector is the BasicDataConnector implementation for MDATPDataConnector.
-func (mdc MDATPDataConnector) AsAATPDataConnector() (*AATPDataConnector, bool) {
- return nil, false
-}
-
-// AsMDATPDataConnector is the BasicDataConnector implementation for MDATPDataConnector.
-func (mdc MDATPDataConnector) AsMDATPDataConnector() (*MDATPDataConnector, bool) {
- return &mdc, true
-}
-
-// AsDataConnector is the BasicDataConnector implementation for MDATPDataConnector.
-func (mdc MDATPDataConnector) AsDataConnector() (*DataConnector, bool) {
- return nil, false
-}
-
-// AsBasicDataConnector is the BasicDataConnector implementation for MDATPDataConnector.
-func (mdc MDATPDataConnector) AsBasicDataConnector() (BasicDataConnector, bool) {
- return &mdc, true
+// AsBasicAlertRuleTemplate is the BasicAlertRuleTemplate implementation for FusionAlertRuleTemplate.
+func (fart FusionAlertRuleTemplate) AsBasicAlertRuleTemplate() (BasicAlertRuleTemplate, bool) {
+ return &fart, true
}
-// UnmarshalJSON is the custom unmarshaler for MDATPDataConnector struct.
-func (mdc *MDATPDataConnector) UnmarshalJSON(body []byte) error {
+// UnmarshalJSON is the custom unmarshaler for FusionAlertRuleTemplate struct.
+func (fart *FusionAlertRuleTemplate) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
@@ -3923,12 +5860,12 @@ func (mdc *MDATPDataConnector) UnmarshalJSON(body []byte) error {
switch k {
case "properties":
if v != nil {
- var mDATPDataConnectorProperties MDATPDataConnectorProperties
- err = json.Unmarshal(*v, &mDATPDataConnectorProperties)
+ var fusionAlertRuleTemplateProperties FusionAlertRuleTemplateProperties
+ err = json.Unmarshal(*v, &fusionAlertRuleTemplateProperties)
if err != nil {
return err
}
- mdc.MDATPDataConnectorProperties = &mDATPDataConnectorProperties
+ fart.FusionAlertRuleTemplateProperties = &fusionAlertRuleTemplateProperties
}
case "id":
if v != nil {
@@ -3937,7 +5874,7 @@ func (mdc *MDATPDataConnector) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- mdc.ID = &ID
+ fart.ID = &ID
}
case "type":
if v != nil {
@@ -3946,7 +5883,7 @@ func (mdc *MDATPDataConnector) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- mdc.Type = &typeVar
+ fart.Type = &typeVar
}
case "name":
if v != nil {
@@ -3955,7 +5892,7 @@ func (mdc *MDATPDataConnector) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- mdc.Name = &name
+ fart.Name = &name
}
case "etag":
if v != nil {
@@ -3964,16 +5901,16 @@ func (mdc *MDATPDataConnector) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- mdc.Etag = &etag
+ fart.Etag = &etag
}
case "kind":
if v != nil {
- var kind KindBasicDataConnector
+ var kind KindBasicAlertRuleTemplate
err = json.Unmarshal(*v, &kind)
if err != nil {
return err
}
- mdc.Kind = kind
+ fart.Kind = kind
}
}
}
@@ -3981,39 +5918,164 @@ func (mdc *MDATPDataConnector) UnmarshalJSON(body []byte) error {
return nil
}
-// MDATPDataConnectorProperties MDATP (Microsoft Defender Advanced Threat Protection) data connector
-// properties.
-type MDATPDataConnectorProperties struct {
- // TenantID - The tenant id to connect to, and get the data from.
- TenantID *string `json:"tenantId,omitempty"`
- // DataTypes - The available data types for the connector.
- DataTypes *AlertsDataTypeOfDataConnector `json:"dataTypes,omitempty"`
+// FusionAlertRuleTemplateProperties fusion alert rule template properties
+type FusionAlertRuleTemplateProperties struct {
+ // DisplayName - The display name for alert rule template.
+ DisplayName *string `json:"displayName,omitempty"`
+ // Description - The description of the alert rule template.
+ Description *string `json:"description,omitempty"`
+ // Tactics - The tactics of the alert rule template
+ Tactics *[]AttackTactic `json:"tactics,omitempty"`
+ // CreatedDateUTC - READ-ONLY; The time that this alert rule template has been added.
+ CreatedDateUTC *string `json:"createdDateUTC,omitempty"`
+ // Status - The alert rule template status. Possible values include: 'Installed', 'Available', 'NotAvailable'
+ Status TemplateStatus `json:"status,omitempty"`
+ // RequiredDataConnectors - The required data connectors for this template
+ RequiredDataConnectors *[]DataConnectorStatus `json:"requiredDataConnectors,omitempty"`
+ // AlertRulesCreatedByTemplateCount - the number of alert rules that were created by this template
+ AlertRulesCreatedByTemplateCount *int32 `json:"alertRulesCreatedByTemplateCount,omitempty"`
+ // Severity - The severity for alerts created by this alert rule. Possible values include: 'High', 'Medium', 'Low', 'Informational'
+ Severity AlertSeverity `json:"severity,omitempty"`
}
-// OfficeConsent consent for Office365 tenant that already made.
-type OfficeConsent struct {
- autorest.Response `json:"-"`
- // OfficeConsentProperties - Office consent properties
- *OfficeConsentProperties `json:"properties,omitempty"`
+// FusionAlertRuleTemplatePropertiesModel filter alert rule template property bag.
+type FusionAlertRuleTemplatePropertiesModel struct {
+ // Severity - The severity for alerts created by this alert rule. Possible values include: 'High', 'Medium', 'Low', 'Informational'
+ Severity AlertSeverity `json:"severity,omitempty"`
+}
+
+// GeoLocation the geo-location context attached to the ip entity
+type GeoLocation struct {
+ // CountryCode - READ-ONLY; The country code according to ISO 3166 format
+ CountryCode *string `json:"countryCode,omitempty"`
+ // CountryName - READ-ONLY; Country name according to ISO 3166 Alpha 2: the lowercase of the English Short Name
+ CountryName *string `json:"countryName,omitempty"`
+ // State - READ-ONLY; State name
+ State *string `json:"state,omitempty"`
+ // City - READ-ONLY; City name
+ City *string `json:"city,omitempty"`
+ // Longitude - READ-ONLY; The latitude of the identified location, expressed as a floating point number with range of - 90 to 90, with positive numbers representing North and negative numbers representing South. Latitude and longitude are derived from the city or postal code.
+ Longitude *float64 `json:"longitude,omitempty"`
+ // Latitude - READ-ONLY; The longitude of the identified location, expressed as a floating point number with range of -180 to 180, with positive numbers representing East and negative numbers representing West. Latitude and longitude are derived from the city or postal code.
+ Latitude *float64 `json:"latitude,omitempty"`
+ // Asn - READ-ONLY; Autonomous System Number
+ Asn *int32 `json:"asn,omitempty"`
+}
+
+// HostEntity represents a host entity.
+type HostEntity struct {
+ // HostEntityProperties - Host entity properties
+ *HostEntityProperties `json:"properties,omitempty"`
// ID - READ-ONLY; Azure resource Id
ID *string `json:"id,omitempty"`
// Type - READ-ONLY; Azure resource type
Type *string `json:"type,omitempty"`
// Name - READ-ONLY; Azure resource name
Name *string `json:"name,omitempty"`
+ // Kind - Possible values include: 'KindEntity', 'KindAccount', 'KindHost', 'KindFile', 'KindSecurityAlert', 'KindFileHash', 'KindMalware', 'KindSecurityGroup', 'KindAzureResource', 'KindCloudApplication', 'KindProcess', 'KindDNSResolution', 'KindIP', 'KindRegistryKey', 'KindRegistryValue', 'KindURL'
+ Kind KindBasicEntity `json:"kind,omitempty"`
}
-// MarshalJSON is the custom marshaler for OfficeConsent.
-func (oc OfficeConsent) MarshalJSON() ([]byte, error) {
+// MarshalJSON is the custom marshaler for HostEntity.
+func (he HostEntity) MarshalJSON() ([]byte, error) {
+ he.Kind = KindHost
objectMap := make(map[string]interface{})
- if oc.OfficeConsentProperties != nil {
- objectMap["properties"] = oc.OfficeConsentProperties
+ if he.HostEntityProperties != nil {
+ objectMap["properties"] = he.HostEntityProperties
+ }
+ if he.Kind != "" {
+ objectMap["kind"] = he.Kind
}
return json.Marshal(objectMap)
}
-// UnmarshalJSON is the custom unmarshaler for OfficeConsent struct.
-func (oc *OfficeConsent) UnmarshalJSON(body []byte) error {
+// AsAccountEntity is the BasicEntity implementation for HostEntity.
+func (he HostEntity) AsAccountEntity() (*AccountEntity, bool) {
+ return nil, false
+}
+
+// AsHostEntity is the BasicEntity implementation for HostEntity.
+func (he HostEntity) AsHostEntity() (*HostEntity, bool) {
+ return &he, true
+}
+
+// AsFileEntity is the BasicEntity implementation for HostEntity.
+func (he HostEntity) AsFileEntity() (*FileEntity, bool) {
+ return nil, false
+}
+
+// AsSecurityAlert is the BasicEntity implementation for HostEntity.
+func (he HostEntity) AsSecurityAlert() (*SecurityAlert, bool) {
+ return nil, false
+}
+
+// AsFileHashEntity is the BasicEntity implementation for HostEntity.
+func (he HostEntity) AsFileHashEntity() (*FileHashEntity, bool) {
+ return nil, false
+}
+
+// AsMalwareEntity is the BasicEntity implementation for HostEntity.
+func (he HostEntity) AsMalwareEntity() (*MalwareEntity, bool) {
+ return nil, false
+}
+
+// AsSecurityGroupEntity is the BasicEntity implementation for HostEntity.
+func (he HostEntity) AsSecurityGroupEntity() (*SecurityGroupEntity, bool) {
+ return nil, false
+}
+
+// AsAzureResourceEntity is the BasicEntity implementation for HostEntity.
+func (he HostEntity) AsAzureResourceEntity() (*AzureResourceEntity, bool) {
+ return nil, false
+}
+
+// AsCloudApplicationEntity is the BasicEntity implementation for HostEntity.
+func (he HostEntity) AsCloudApplicationEntity() (*CloudApplicationEntity, bool) {
+ return nil, false
+}
+
+// AsProcessEntity is the BasicEntity implementation for HostEntity.
+func (he HostEntity) AsProcessEntity() (*ProcessEntity, bool) {
+ return nil, false
+}
+
+// AsDNSEntity is the BasicEntity implementation for HostEntity.
+func (he HostEntity) AsDNSEntity() (*DNSEntity, bool) {
+ return nil, false
+}
+
+// AsIPEntity is the BasicEntity implementation for HostEntity.
+func (he HostEntity) AsIPEntity() (*IPEntity, bool) {
+ return nil, false
+}
+
+// AsRegistryKeyEntity is the BasicEntity implementation for HostEntity.
+func (he HostEntity) AsRegistryKeyEntity() (*RegistryKeyEntity, bool) {
+ return nil, false
+}
+
+// AsRegistryValueEntity is the BasicEntity implementation for HostEntity.
+func (he HostEntity) AsRegistryValueEntity() (*RegistryValueEntity, bool) {
+ return nil, false
+}
+
+// AsURLEntity is the BasicEntity implementation for HostEntity.
+func (he HostEntity) AsURLEntity() (*URLEntity, bool) {
+ return nil, false
+}
+
+// AsEntity is the BasicEntity implementation for HostEntity.
+func (he HostEntity) AsEntity() (*Entity, bool) {
+ return nil, false
+}
+
+// AsBasicEntity is the BasicEntity implementation for HostEntity.
+func (he HostEntity) AsBasicEntity() (BasicEntity, bool) {
+ return &he, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for HostEntity struct.
+func (he *HostEntity) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
@@ -4023,12 +6085,12 @@ func (oc *OfficeConsent) UnmarshalJSON(body []byte) error {
switch k {
case "properties":
if v != nil {
- var officeConsentProperties OfficeConsentProperties
- err = json.Unmarshal(*v, &officeConsentProperties)
+ var hostEntityProperties HostEntityProperties
+ err = json.Unmarshal(*v, &hostEntityProperties)
if err != nil {
return err
}
- oc.OfficeConsentProperties = &officeConsentProperties
+ he.HostEntityProperties = &hostEntityProperties
}
case "id":
if v != nil {
@@ -4037,7 +6099,7 @@ func (oc *OfficeConsent) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- oc.ID = &ID
+ he.ID = &ID
}
case "type":
if v != nil {
@@ -4046,7 +6108,7 @@ func (oc *OfficeConsent) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- oc.Type = &typeVar
+ he.Type = &typeVar
}
case "name":
if v != nil {
@@ -4055,7 +6117,16 @@ func (oc *OfficeConsent) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- oc.Name = &name
+ he.Name = &name
+ }
+ case "kind":
+ if v != nil {
+ var kind KindBasicEntity
+ err = json.Unmarshal(*v, &kind)
+ if err != nil {
+ return err
+ }
+ he.Kind = kind
}
}
}
@@ -4063,244 +6134,350 @@ func (oc *OfficeConsent) UnmarshalJSON(body []byte) error {
return nil
}
-// OfficeConsentList list of all the office365 consents.
-type OfficeConsentList struct {
- autorest.Response `json:"-"`
- // NextLink - READ-ONLY; URL to fetch the next set of office consents.
- NextLink *string `json:"nextLink,omitempty"`
- // Value - Array of the consents.
- Value *[]OfficeConsent `json:"value,omitempty"`
+// HostEntityProperties host entity property bag.
+type HostEntityProperties struct {
+ // DNSDomain - READ-ONLY; The DNS domain that this host belongs to. Should contain the compete DNS suffix for the domain
+ DNSDomain *string `json:"dnsDomain,omitempty"`
+ // NtDomain - READ-ONLY; The NT domain that this host belongs to.
+ NtDomain *string `json:"ntDomain,omitempty"`
+ // HostName - READ-ONLY; The hostname without the domain suffix.
+ HostName *string `json:"hostName,omitempty"`
+ // NetBiosName - READ-ONLY; The host name (pre-windows2000).
+ NetBiosName *string `json:"netBiosName,omitempty"`
+ // AzureID - READ-ONLY; The azure resource id of the VM.
+ AzureID *string `json:"azureID,omitempty"`
+ // OmsAgentID - READ-ONLY; The OMS agent id, if the host has OMS agent installed.
+ OmsAgentID *string `json:"omsAgentID,omitempty"`
+ // OsFamily - The operating system type. Possible values include: 'Linux', 'Windows', 'Android', 'IOS'
+ OsFamily OSFamily `json:"osFamily,omitempty"`
+ // OsVersion - READ-ONLY; A free text representation of the operating system. This field is meant to hold specific versions the are more fine grained than OSFamily or future values not supported by OSFamily enumeration
+ OsVersion *string `json:"osVersion,omitempty"`
+ // IsDomainJoined - READ-ONLY; Determines whether this host belongs to a domain.
+ IsDomainJoined *bool `json:"isDomainJoined,omitempty"`
+ // FriendlyName - READ-ONLY; The graph item display name which is a short humanly readable description of the graph item instance. This property is optional and might be system generated.
+ FriendlyName *string `json:"friendlyName,omitempty"`
+ // AdditionalData - READ-ONLY; A bag of custom fields that should be part of the entity and will be presented to the user.
+ AdditionalData map[string]interface{} `json:"additionalData"`
}
-// OfficeConsentListIterator provides access to a complete listing of OfficeConsent values.
-type OfficeConsentListIterator struct {
- i int
- page OfficeConsentListPage
+// MarshalJSON is the custom marshaler for HostEntityProperties.
+func (hep HostEntityProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if hep.OsFamily != "" {
+ objectMap["osFamily"] = hep.OsFamily
+ }
+ return json.Marshal(objectMap)
}
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *OfficeConsentListIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/OfficeConsentListIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
+// IPEntity represents an ip entity.
+type IPEntity struct {
+ // IPEntityProperties - Ip entity properties
+ *IPEntityProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Azure resource Id
+ ID *string `json:"id,omitempty"`
+ // Type - READ-ONLY; Azure resource type
+ Type *string `json:"type,omitempty"`
+ // Name - READ-ONLY; Azure resource name
+ Name *string `json:"name,omitempty"`
+ // Kind - Possible values include: 'KindEntity', 'KindAccount', 'KindHost', 'KindFile', 'KindSecurityAlert', 'KindFileHash', 'KindMalware', 'KindSecurityGroup', 'KindAzureResource', 'KindCloudApplication', 'KindProcess', 'KindDNSResolution', 'KindIP', 'KindRegistryKey', 'KindRegistryValue', 'KindURL'
+ Kind KindBasicEntity `json:"kind,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for IPEntity.
+func (ie IPEntity) MarshalJSON() ([]byte, error) {
+ ie.Kind = KindIP
+ objectMap := make(map[string]interface{})
+ if ie.IPEntityProperties != nil {
+ objectMap["properties"] = ie.IPEntityProperties
}
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
+ if ie.Kind != "" {
+ objectMap["kind"] = ie.Kind
}
- iter.i = 0
- return nil
+ return json.Marshal(objectMap)
}
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *OfficeConsentListIterator) Next() error {
- return iter.NextWithContext(context.Background())
+// AsAccountEntity is the BasicEntity implementation for IPEntity.
+func (ie IPEntity) AsAccountEntity() (*AccountEntity, bool) {
+ return nil, false
}
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter OfficeConsentListIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
+// AsHostEntity is the BasicEntity implementation for IPEntity.
+func (ie IPEntity) AsHostEntity() (*HostEntity, bool) {
+ return nil, false
}
-// Response returns the raw server response from the last page request.
-func (iter OfficeConsentListIterator) Response() OfficeConsentList {
- return iter.page.Response()
+// AsFileEntity is the BasicEntity implementation for IPEntity.
+func (ie IPEntity) AsFileEntity() (*FileEntity, bool) {
+ return nil, false
}
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter OfficeConsentListIterator) Value() OfficeConsent {
- if !iter.page.NotDone() {
- return OfficeConsent{}
- }
- return iter.page.Values()[iter.i]
+// AsSecurityAlert is the BasicEntity implementation for IPEntity.
+func (ie IPEntity) AsSecurityAlert() (*SecurityAlert, bool) {
+ return nil, false
}
-// Creates a new instance of the OfficeConsentListIterator type.
-func NewOfficeConsentListIterator(page OfficeConsentListPage) OfficeConsentListIterator {
- return OfficeConsentListIterator{page: page}
+// AsFileHashEntity is the BasicEntity implementation for IPEntity.
+func (ie IPEntity) AsFileHashEntity() (*FileHashEntity, bool) {
+ return nil, false
}
-// IsEmpty returns true if the ListResult contains no values.
-func (ocl OfficeConsentList) IsEmpty() bool {
- return ocl.Value == nil || len(*ocl.Value) == 0
+// AsMalwareEntity is the BasicEntity implementation for IPEntity.
+func (ie IPEntity) AsMalwareEntity() (*MalwareEntity, bool) {
+ return nil, false
}
-// officeConsentListPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (ocl OfficeConsentList) officeConsentListPreparer(ctx context.Context) (*http.Request, error) {
- if ocl.NextLink == nil || len(to.String(ocl.NextLink)) < 1 {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(ocl.NextLink)))
+// AsSecurityGroupEntity is the BasicEntity implementation for IPEntity.
+func (ie IPEntity) AsSecurityGroupEntity() (*SecurityGroupEntity, bool) {
+ return nil, false
}
-// OfficeConsentListPage contains a page of OfficeConsent values.
-type OfficeConsentListPage struct {
- fn func(context.Context, OfficeConsentList) (OfficeConsentList, error)
- ocl OfficeConsentList
+// AsAzureResourceEntity is the BasicEntity implementation for IPEntity.
+func (ie IPEntity) AsAzureResourceEntity() (*AzureResourceEntity, bool) {
+ return nil, false
}
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *OfficeConsentListPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/OfficeConsentListPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- next, err := page.fn(ctx, page.ocl)
- if err != nil {
- return err
- }
- page.ocl = next
- return nil
+// AsCloudApplicationEntity is the BasicEntity implementation for IPEntity.
+func (ie IPEntity) AsCloudApplicationEntity() (*CloudApplicationEntity, bool) {
+ return nil, false
}
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *OfficeConsentListPage) Next() error {
- return page.NextWithContext(context.Background())
+// AsProcessEntity is the BasicEntity implementation for IPEntity.
+func (ie IPEntity) AsProcessEntity() (*ProcessEntity, bool) {
+ return nil, false
}
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page OfficeConsentListPage) NotDone() bool {
- return !page.ocl.IsEmpty()
+// AsDNSEntity is the BasicEntity implementation for IPEntity.
+func (ie IPEntity) AsDNSEntity() (*DNSEntity, bool) {
+ return nil, false
}
-// Response returns the raw server response from the last page request.
-func (page OfficeConsentListPage) Response() OfficeConsentList {
- return page.ocl
+// AsIPEntity is the BasicEntity implementation for IPEntity.
+func (ie IPEntity) AsIPEntity() (*IPEntity, bool) {
+ return &ie, true
}
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page OfficeConsentListPage) Values() []OfficeConsent {
- if page.ocl.IsEmpty() {
- return nil
+// AsRegistryKeyEntity is the BasicEntity implementation for IPEntity.
+func (ie IPEntity) AsRegistryKeyEntity() (*RegistryKeyEntity, bool) {
+ return nil, false
+}
+
+// AsRegistryValueEntity is the BasicEntity implementation for IPEntity.
+func (ie IPEntity) AsRegistryValueEntity() (*RegistryValueEntity, bool) {
+ return nil, false
+}
+
+// AsURLEntity is the BasicEntity implementation for IPEntity.
+func (ie IPEntity) AsURLEntity() (*URLEntity, bool) {
+ return nil, false
+}
+
+// AsEntity is the BasicEntity implementation for IPEntity.
+func (ie IPEntity) AsEntity() (*Entity, bool) {
+ return nil, false
+}
+
+// AsBasicEntity is the BasicEntity implementation for IPEntity.
+func (ie IPEntity) AsBasicEntity() (BasicEntity, bool) {
+ return &ie, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for IPEntity struct.
+func (ie *IPEntity) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
}
- return *page.ocl.Value
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var IPEntityProperties IPEntityProperties
+ err = json.Unmarshal(*v, &IPEntityProperties)
+ if err != nil {
+ return err
+ }
+ ie.IPEntityProperties = &IPEntityProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ ie.ID = &ID
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ ie.Type = &typeVar
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ ie.Name = &name
+ }
+ case "kind":
+ if v != nil {
+ var kind KindBasicEntity
+ err = json.Unmarshal(*v, &kind)
+ if err != nil {
+ return err
+ }
+ ie.Kind = kind
+ }
+ }
+ }
+
+ return nil
}
-// Creates a new instance of the OfficeConsentListPage type.
-func NewOfficeConsentListPage(getNextPage func(context.Context, OfficeConsentList) (OfficeConsentList, error)) OfficeConsentListPage {
- return OfficeConsentListPage{fn: getNextPage}
+// IPEntityProperties ip entity property bag.
+type IPEntityProperties struct {
+ // Address - READ-ONLY; The IP address as string, e.g. 127.0.0.1 (either in Ipv4 or Ipv6)
+ Address *string `json:"address,omitempty"`
+ // Location - The geo-location context attached to the ip entity
+ Location *GeoLocation `json:"location,omitempty"`
+ // ThreatIntelligence - READ-ONLY; A list of TI contexts attached to the ip entity.
+ ThreatIntelligence *[]ThreatIntelligence `json:"threatIntelligence,omitempty"`
+ // FriendlyName - READ-ONLY; The graph item display name which is a short humanly readable description of the graph item instance. This property is optional and might be system generated.
+ FriendlyName *string `json:"friendlyName,omitempty"`
+ // AdditionalData - READ-ONLY; A bag of custom fields that should be part of the entity and will be presented to the user.
+ AdditionalData map[string]interface{} `json:"additionalData"`
}
-// OfficeConsentProperties consent property bag.
-type OfficeConsentProperties struct {
- // TenantID - The tenantId of the Office365 with the consent.
- TenantID *string `json:"tenantId,omitempty"`
- // TenantName - READ-ONLY; The tenant name of the Office365 with the consent.
- TenantName *string `json:"tenantName,omitempty"`
+// MarshalJSON is the custom marshaler for IPEntityProperties.
+func (iep IPEntityProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if iep.Location != nil {
+ objectMap["location"] = iep.Location
+ }
+ return json.Marshal(objectMap)
}
-// OfficeDataConnector represents office data connector.
-type OfficeDataConnector struct {
- // OfficeDataConnectorProperties - Office data connector properties.
- *OfficeDataConnectorProperties `json:"properties,omitempty"`
+// MalwareEntity represents a malware entity.
+type MalwareEntity struct {
+ // MalwareEntityProperties - File entity properties
+ *MalwareEntityProperties `json:"properties,omitempty"`
// ID - READ-ONLY; Azure resource Id
ID *string `json:"id,omitempty"`
// Type - READ-ONLY; Azure resource type
Type *string `json:"type,omitempty"`
// Name - READ-ONLY; Azure resource name
Name *string `json:"name,omitempty"`
- // Etag - Etag of the data connector.
- Etag *string `json:"etag,omitempty"`
- // Kind - Possible values include: 'KindDataConnector', 'KindOffice365', 'KindThreatIntelligence', 'KindAmazonWebServicesCloudTrail', 'KindAzureActiveDirectory', 'KindAzureSecurityCenter', 'KindMicrosoftCloudAppSecurity', 'KindAzureAdvancedThreatProtection', 'KindMicrosoftDefenderAdvancedThreatProtection'
- Kind KindBasicDataConnector `json:"kind,omitempty"`
+ // Kind - Possible values include: 'KindEntity', 'KindAccount', 'KindHost', 'KindFile', 'KindSecurityAlert', 'KindFileHash', 'KindMalware', 'KindSecurityGroup', 'KindAzureResource', 'KindCloudApplication', 'KindProcess', 'KindDNSResolution', 'KindIP', 'KindRegistryKey', 'KindRegistryValue', 'KindURL'
+ Kind KindBasicEntity `json:"kind,omitempty"`
}
-// MarshalJSON is the custom marshaler for OfficeDataConnector.
-func (odc OfficeDataConnector) MarshalJSON() ([]byte, error) {
- odc.Kind = KindOffice365
+// MarshalJSON is the custom marshaler for MalwareEntity.
+func (me MalwareEntity) MarshalJSON() ([]byte, error) {
+ me.Kind = KindMalware
objectMap := make(map[string]interface{})
- if odc.OfficeDataConnectorProperties != nil {
- objectMap["properties"] = odc.OfficeDataConnectorProperties
- }
- if odc.Etag != nil {
- objectMap["etag"] = odc.Etag
+ if me.MalwareEntityProperties != nil {
+ objectMap["properties"] = me.MalwareEntityProperties
}
- if odc.Kind != "" {
- objectMap["kind"] = odc.Kind
+ if me.Kind != "" {
+ objectMap["kind"] = me.Kind
}
return json.Marshal(objectMap)
}
-// AsOfficeDataConnector is the BasicDataConnector implementation for OfficeDataConnector.
-func (odc OfficeDataConnector) AsOfficeDataConnector() (*OfficeDataConnector, bool) {
- return &odc, true
+// AsAccountEntity is the BasicEntity implementation for MalwareEntity.
+func (me MalwareEntity) AsAccountEntity() (*AccountEntity, bool) {
+ return nil, false
}
-// AsTIDataConnector is the BasicDataConnector implementation for OfficeDataConnector.
-func (odc OfficeDataConnector) AsTIDataConnector() (*TIDataConnector, bool) {
+// AsHostEntity is the BasicEntity implementation for MalwareEntity.
+func (me MalwareEntity) AsHostEntity() (*HostEntity, bool) {
return nil, false
}
-// AsAwsCloudTrailDataConnector is the BasicDataConnector implementation for OfficeDataConnector.
-func (odc OfficeDataConnector) AsAwsCloudTrailDataConnector() (*AwsCloudTrailDataConnector, bool) {
+// AsFileEntity is the BasicEntity implementation for MalwareEntity.
+func (me MalwareEntity) AsFileEntity() (*FileEntity, bool) {
return nil, false
}
-// AsAADDataConnector is the BasicDataConnector implementation for OfficeDataConnector.
-func (odc OfficeDataConnector) AsAADDataConnector() (*AADDataConnector, bool) {
+// AsSecurityAlert is the BasicEntity implementation for MalwareEntity.
+func (me MalwareEntity) AsSecurityAlert() (*SecurityAlert, bool) {
return nil, false
}
-// AsASCDataConnector is the BasicDataConnector implementation for OfficeDataConnector.
-func (odc OfficeDataConnector) AsASCDataConnector() (*ASCDataConnector, bool) {
+// AsFileHashEntity is the BasicEntity implementation for MalwareEntity.
+func (me MalwareEntity) AsFileHashEntity() (*FileHashEntity, bool) {
return nil, false
}
-// AsMCASDataConnector is the BasicDataConnector implementation for OfficeDataConnector.
-func (odc OfficeDataConnector) AsMCASDataConnector() (*MCASDataConnector, bool) {
+// AsMalwareEntity is the BasicEntity implementation for MalwareEntity.
+func (me MalwareEntity) AsMalwareEntity() (*MalwareEntity, bool) {
+ return &me, true
+}
+
+// AsSecurityGroupEntity is the BasicEntity implementation for MalwareEntity.
+func (me MalwareEntity) AsSecurityGroupEntity() (*SecurityGroupEntity, bool) {
return nil, false
}
-// AsAATPDataConnector is the BasicDataConnector implementation for OfficeDataConnector.
-func (odc OfficeDataConnector) AsAATPDataConnector() (*AATPDataConnector, bool) {
+// AsAzureResourceEntity is the BasicEntity implementation for MalwareEntity.
+func (me MalwareEntity) AsAzureResourceEntity() (*AzureResourceEntity, bool) {
return nil, false
}
-// AsMDATPDataConnector is the BasicDataConnector implementation for OfficeDataConnector.
-func (odc OfficeDataConnector) AsMDATPDataConnector() (*MDATPDataConnector, bool) {
+// AsCloudApplicationEntity is the BasicEntity implementation for MalwareEntity.
+func (me MalwareEntity) AsCloudApplicationEntity() (*CloudApplicationEntity, bool) {
return nil, false
}
-// AsDataConnector is the BasicDataConnector implementation for OfficeDataConnector.
-func (odc OfficeDataConnector) AsDataConnector() (*DataConnector, bool) {
+// AsProcessEntity is the BasicEntity implementation for MalwareEntity.
+func (me MalwareEntity) AsProcessEntity() (*ProcessEntity, bool) {
return nil, false
}
-// AsBasicDataConnector is the BasicDataConnector implementation for OfficeDataConnector.
-func (odc OfficeDataConnector) AsBasicDataConnector() (BasicDataConnector, bool) {
- return &odc, true
+// AsDNSEntity is the BasicEntity implementation for MalwareEntity.
+func (me MalwareEntity) AsDNSEntity() (*DNSEntity, bool) {
+ return nil, false
}
-// UnmarshalJSON is the custom unmarshaler for OfficeDataConnector struct.
-func (odc *OfficeDataConnector) UnmarshalJSON(body []byte) error {
+// AsIPEntity is the BasicEntity implementation for MalwareEntity.
+func (me MalwareEntity) AsIPEntity() (*IPEntity, bool) {
+ return nil, false
+}
+
+// AsRegistryKeyEntity is the BasicEntity implementation for MalwareEntity.
+func (me MalwareEntity) AsRegistryKeyEntity() (*RegistryKeyEntity, bool) {
+ return nil, false
+}
+
+// AsRegistryValueEntity is the BasicEntity implementation for MalwareEntity.
+func (me MalwareEntity) AsRegistryValueEntity() (*RegistryValueEntity, bool) {
+ return nil, false
+}
+
+// AsURLEntity is the BasicEntity implementation for MalwareEntity.
+func (me MalwareEntity) AsURLEntity() (*URLEntity, bool) {
+ return nil, false
+}
+
+// AsEntity is the BasicEntity implementation for MalwareEntity.
+func (me MalwareEntity) AsEntity() (*Entity, bool) {
+ return nil, false
+}
+
+// AsBasicEntity is the BasicEntity implementation for MalwareEntity.
+func (me MalwareEntity) AsBasicEntity() (BasicEntity, bool) {
+ return &me, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for MalwareEntity struct.
+func (me *MalwareEntity) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
@@ -4310,12 +6487,12 @@ func (odc *OfficeDataConnector) UnmarshalJSON(body []byte) error {
switch k {
case "properties":
if v != nil {
- var officeDataConnectorProperties OfficeDataConnectorProperties
- err = json.Unmarshal(*v, &officeDataConnectorProperties)
+ var malwareEntityProperties MalwareEntityProperties
+ err = json.Unmarshal(*v, &malwareEntityProperties)
if err != nil {
return err
}
- odc.OfficeDataConnectorProperties = &officeDataConnectorProperties
+ me.MalwareEntityProperties = &malwareEntityProperties
}
case "id":
if v != nil {
@@ -4324,7 +6501,7 @@ func (odc *OfficeDataConnector) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- odc.ID = &ID
+ me.ID = &ID
}
case "type":
if v != nil {
@@ -4333,7 +6510,7 @@ func (odc *OfficeDataConnector) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- odc.Type = &typeVar
+ me.Type = &typeVar
}
case "name":
if v != nil {
@@ -4342,25 +6519,16 @@ func (odc *OfficeDataConnector) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- odc.Name = &name
- }
- case "etag":
- if v != nil {
- var etag string
- err = json.Unmarshal(*v, &etag)
- if err != nil {
- return err
- }
- odc.Etag = &etag
+ me.Name = &name
}
case "kind":
if v != nil {
- var kind KindBasicDataConnector
+ var kind KindBasicEntity
err = json.Unmarshal(*v, &kind)
if err != nil {
return err
}
- odc.Kind = kind
+ me.Kind = kind
}
}
}
@@ -4368,259 +6536,2204 @@ func (odc *OfficeDataConnector) UnmarshalJSON(body []byte) error {
return nil
}
-// OfficeDataConnectorDataTypes the available data types for office data connector.
-type OfficeDataConnectorDataTypes struct {
- // SharePoint - SharePoint data type connection.
- SharePoint *OfficeDataConnectorDataTypesSharePoint `json:"sharePoint,omitempty"`
- // Exchange - Exchange data type connection.
- Exchange *OfficeDataConnectorDataTypesExchange `json:"exchange,omitempty"`
-}
-
-// OfficeDataConnectorDataTypesExchange exchange data type connection.
-type OfficeDataConnectorDataTypesExchange struct {
- // State - Describe whether this data type connection is enabled or not. Possible values include: 'Enabled', 'Disabled'
- State DataTypeState `json:"state,omitempty"`
-}
-
-// OfficeDataConnectorDataTypesSharePoint sharePoint data type connection.
-type OfficeDataConnectorDataTypesSharePoint struct {
- // State - Describe whether this data type connection is enabled or not. Possible values include: 'Enabled', 'Disabled'
- State DataTypeState `json:"state,omitempty"`
+// MalwareEntityProperties malware entity property bag.
+type MalwareEntityProperties struct {
+ // MalwareName - READ-ONLY; The malware name by the vendor, e.g. Win32/Toga!rfn
+ MalwareName *string `json:"malwareName,omitempty"`
+ // Category - READ-ONLY; The malware category by the vendor, e.g. Trojan
+ Category *string `json:"category,omitempty"`
+ // FileEntityIds - READ-ONLY; List of linked file entity identifiers on which the malware was found
+ FileEntityIds *[]string `json:"fileEntityIds,omitempty"`
+ // ProcessEntityIds - READ-ONLY; List of linked process entity identifiers on which the malware was found.
+ ProcessEntityIds *[]string `json:"processEntityIds,omitempty"`
+ // FriendlyName - READ-ONLY; The graph item display name which is a short humanly readable description of the graph item instance. This property is optional and might be system generated.
+ FriendlyName *string `json:"friendlyName,omitempty"`
+ // AdditionalData - READ-ONLY; A bag of custom fields that should be part of the entity and will be presented to the user.
+ AdditionalData map[string]interface{} `json:"additionalData"`
+}
+
+// MarshalJSON is the custom marshaler for MalwareEntityProperties.
+func (mep MalwareEntityProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
}
-// OfficeDataConnectorProperties office data connector properties.
-type OfficeDataConnectorProperties struct {
- // DataTypes - The available data types for the connector.
- DataTypes *OfficeDataConnectorDataTypes `json:"dataTypes,omitempty"`
- // TenantID - The tenant id to connect to, and get the data from.
- TenantID *string `json:"tenantId,omitempty"`
+// MCASDataConnector represents MCAS (Microsoft Cloud App Security) data connector.
+type MCASDataConnector struct {
+ // MCASDataConnectorProperties - MCAS (Microsoft Cloud App Security) data connector properties.
+ *MCASDataConnectorProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Azure resource Id
+ ID *string `json:"id,omitempty"`
+ // Type - READ-ONLY; Azure resource type
+ Type *string `json:"type,omitempty"`
+ // Name - READ-ONLY; Azure resource name
+ Name *string `json:"name,omitempty"`
+ // Etag - Etag of the data connector.
+ Etag *string `json:"etag,omitempty"`
+ // Kind - Possible values include: 'KindDataConnector', 'KindOffice365', 'KindThreatIntelligence', 'KindAmazonWebServicesCloudTrail', 'KindAzureActiveDirectory', 'KindAzureSecurityCenter', 'KindMicrosoftCloudAppSecurity', 'KindAzureAdvancedThreatProtection', 'KindMicrosoftDefenderAdvancedThreatProtection'
+ Kind KindBasicDataConnector `json:"kind,omitempty"`
}
-// Operation operation provided by provider
+// MarshalJSON is the custom marshaler for MCASDataConnector.
+func (mdc MCASDataConnector) MarshalJSON() ([]byte, error) {
+ mdc.Kind = KindMicrosoftCloudAppSecurity
+ objectMap := make(map[string]interface{})
+ if mdc.MCASDataConnectorProperties != nil {
+ objectMap["properties"] = mdc.MCASDataConnectorProperties
+ }
+ if mdc.Etag != nil {
+ objectMap["etag"] = mdc.Etag
+ }
+ if mdc.Kind != "" {
+ objectMap["kind"] = mdc.Kind
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsOfficeDataConnector is the BasicDataConnector implementation for MCASDataConnector.
+func (mdc MCASDataConnector) AsOfficeDataConnector() (*OfficeDataConnector, bool) {
+ return nil, false
+}
+
+// AsTIDataConnector is the BasicDataConnector implementation for MCASDataConnector.
+func (mdc MCASDataConnector) AsTIDataConnector() (*TIDataConnector, bool) {
+ return nil, false
+}
+
+// AsAwsCloudTrailDataConnector is the BasicDataConnector implementation for MCASDataConnector.
+func (mdc MCASDataConnector) AsAwsCloudTrailDataConnector() (*AwsCloudTrailDataConnector, bool) {
+ return nil, false
+}
+
+// AsAADDataConnector is the BasicDataConnector implementation for MCASDataConnector.
+func (mdc MCASDataConnector) AsAADDataConnector() (*AADDataConnector, bool) {
+ return nil, false
+}
+
+// AsASCDataConnector is the BasicDataConnector implementation for MCASDataConnector.
+func (mdc MCASDataConnector) AsASCDataConnector() (*ASCDataConnector, bool) {
+ return nil, false
+}
+
+// AsMCASDataConnector is the BasicDataConnector implementation for MCASDataConnector.
+func (mdc MCASDataConnector) AsMCASDataConnector() (*MCASDataConnector, bool) {
+ return &mdc, true
+}
+
+// AsAATPDataConnector is the BasicDataConnector implementation for MCASDataConnector.
+func (mdc MCASDataConnector) AsAATPDataConnector() (*AATPDataConnector, bool) {
+ return nil, false
+}
+
+// AsMDATPDataConnector is the BasicDataConnector implementation for MCASDataConnector.
+func (mdc MCASDataConnector) AsMDATPDataConnector() (*MDATPDataConnector, bool) {
+ return nil, false
+}
+
+// AsDataConnector is the BasicDataConnector implementation for MCASDataConnector.
+func (mdc MCASDataConnector) AsDataConnector() (*DataConnector, bool) {
+ return nil, false
+}
+
+// AsBasicDataConnector is the BasicDataConnector implementation for MCASDataConnector.
+func (mdc MCASDataConnector) AsBasicDataConnector() (BasicDataConnector, bool) {
+ return &mdc, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for MCASDataConnector struct.
+func (mdc *MCASDataConnector) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var mCASDataConnectorProperties MCASDataConnectorProperties
+ err = json.Unmarshal(*v, &mCASDataConnectorProperties)
+ if err != nil {
+ return err
+ }
+ mdc.MCASDataConnectorProperties = &mCASDataConnectorProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ mdc.ID = &ID
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ mdc.Type = &typeVar
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ mdc.Name = &name
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ mdc.Etag = &etag
+ }
+ case "kind":
+ if v != nil {
+ var kind KindBasicDataConnector
+ err = json.Unmarshal(*v, &kind)
+ if err != nil {
+ return err
+ }
+ mdc.Kind = kind
+ }
+ }
+ }
+
+ return nil
+}
+
+// MCASDataConnectorDataTypes the available data types for MCAS (Microsoft Cloud App Security) data
+// connector.
+type MCASDataConnectorDataTypes struct {
+ // DiscoveryLogs - Discovery log data type connection.
+ DiscoveryLogs *MCASDataConnectorDataTypesDiscoveryLogs `json:"discoveryLogs,omitempty"`
+ // Alerts - Alerts data type connection.
+ Alerts *AlertsDataTypeOfDataConnectorAlerts `json:"alerts,omitempty"`
+}
+
+// MCASDataConnectorDataTypesDiscoveryLogs discovery log data type connection.
+type MCASDataConnectorDataTypesDiscoveryLogs struct {
+ // State - Describe whether this data type connection is enabled or not. Possible values include: 'Enabled', 'Disabled'
+ State DataTypeState `json:"state,omitempty"`
+}
+
+// MCASDataConnectorProperties MCAS (Microsoft Cloud App Security) data connector properties.
+type MCASDataConnectorProperties struct {
+ // DataTypes - The available data types for the connector.
+ DataTypes *MCASDataConnectorDataTypes `json:"dataTypes,omitempty"`
+ // TenantID - The tenant id to connect to, and get the data from.
+ TenantID *string `json:"tenantId,omitempty"`
+}
+
+// MDATPDataConnector represents MDATP (Microsoft Defender Advanced Threat Protection) data connector.
+type MDATPDataConnector struct {
+ // MDATPDataConnectorProperties - MDATP (Microsoft Defender Advanced Threat Protection) data connector properties.
+ *MDATPDataConnectorProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Azure resource Id
+ ID *string `json:"id,omitempty"`
+ // Type - READ-ONLY; Azure resource type
+ Type *string `json:"type,omitempty"`
+ // Name - READ-ONLY; Azure resource name
+ Name *string `json:"name,omitempty"`
+ // Etag - Etag of the data connector.
+ Etag *string `json:"etag,omitempty"`
+ // Kind - Possible values include: 'KindDataConnector', 'KindOffice365', 'KindThreatIntelligence', 'KindAmazonWebServicesCloudTrail', 'KindAzureActiveDirectory', 'KindAzureSecurityCenter', 'KindMicrosoftCloudAppSecurity', 'KindAzureAdvancedThreatProtection', 'KindMicrosoftDefenderAdvancedThreatProtection'
+ Kind KindBasicDataConnector `json:"kind,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for MDATPDataConnector.
+func (mdc MDATPDataConnector) MarshalJSON() ([]byte, error) {
+ mdc.Kind = KindMicrosoftDefenderAdvancedThreatProtection
+ objectMap := make(map[string]interface{})
+ if mdc.MDATPDataConnectorProperties != nil {
+ objectMap["properties"] = mdc.MDATPDataConnectorProperties
+ }
+ if mdc.Etag != nil {
+ objectMap["etag"] = mdc.Etag
+ }
+ if mdc.Kind != "" {
+ objectMap["kind"] = mdc.Kind
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsOfficeDataConnector is the BasicDataConnector implementation for MDATPDataConnector.
+func (mdc MDATPDataConnector) AsOfficeDataConnector() (*OfficeDataConnector, bool) {
+ return nil, false
+}
+
+// AsTIDataConnector is the BasicDataConnector implementation for MDATPDataConnector.
+func (mdc MDATPDataConnector) AsTIDataConnector() (*TIDataConnector, bool) {
+ return nil, false
+}
+
+// AsAwsCloudTrailDataConnector is the BasicDataConnector implementation for MDATPDataConnector.
+func (mdc MDATPDataConnector) AsAwsCloudTrailDataConnector() (*AwsCloudTrailDataConnector, bool) {
+ return nil, false
+}
+
+// AsAADDataConnector is the BasicDataConnector implementation for MDATPDataConnector.
+func (mdc MDATPDataConnector) AsAADDataConnector() (*AADDataConnector, bool) {
+ return nil, false
+}
+
+// AsASCDataConnector is the BasicDataConnector implementation for MDATPDataConnector.
+func (mdc MDATPDataConnector) AsASCDataConnector() (*ASCDataConnector, bool) {
+ return nil, false
+}
+
+// AsMCASDataConnector is the BasicDataConnector implementation for MDATPDataConnector.
+func (mdc MDATPDataConnector) AsMCASDataConnector() (*MCASDataConnector, bool) {
+ return nil, false
+}
+
+// AsAATPDataConnector is the BasicDataConnector implementation for MDATPDataConnector.
+func (mdc MDATPDataConnector) AsAATPDataConnector() (*AATPDataConnector, bool) {
+ return nil, false
+}
+
+// AsMDATPDataConnector is the BasicDataConnector implementation for MDATPDataConnector.
+func (mdc MDATPDataConnector) AsMDATPDataConnector() (*MDATPDataConnector, bool) {
+ return &mdc, true
+}
+
+// AsDataConnector is the BasicDataConnector implementation for MDATPDataConnector.
+func (mdc MDATPDataConnector) AsDataConnector() (*DataConnector, bool) {
+ return nil, false
+}
+
+// AsBasicDataConnector is the BasicDataConnector implementation for MDATPDataConnector.
+func (mdc MDATPDataConnector) AsBasicDataConnector() (BasicDataConnector, bool) {
+ return &mdc, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for MDATPDataConnector struct.
+func (mdc *MDATPDataConnector) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var mDATPDataConnectorProperties MDATPDataConnectorProperties
+ err = json.Unmarshal(*v, &mDATPDataConnectorProperties)
+ if err != nil {
+ return err
+ }
+ mdc.MDATPDataConnectorProperties = &mDATPDataConnectorProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ mdc.ID = &ID
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ mdc.Type = &typeVar
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ mdc.Name = &name
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ mdc.Etag = &etag
+ }
+ case "kind":
+ if v != nil {
+ var kind KindBasicDataConnector
+ err = json.Unmarshal(*v, &kind)
+ if err != nil {
+ return err
+ }
+ mdc.Kind = kind
+ }
+ }
+ }
+
+ return nil
+}
+
+// MDATPDataConnectorProperties MDATP (Microsoft Defender Advanced Threat Protection) data connector
+// properties.
+type MDATPDataConnectorProperties struct {
+ // TenantID - The tenant id to connect to, and get the data from.
+ TenantID *string `json:"tenantId,omitempty"`
+ // DataTypes - The available data types for the connector.
+ DataTypes *AlertsDataTypeOfDataConnector `json:"dataTypes,omitempty"`
+}
+
+// OfficeConsent consent for Office365 tenant that already made.
+type OfficeConsent struct {
+ autorest.Response `json:"-"`
+ // OfficeConsentProperties - Office consent properties
+ *OfficeConsentProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Azure resource Id
+ ID *string `json:"id,omitempty"`
+ // Type - READ-ONLY; Azure resource type
+ Type *string `json:"type,omitempty"`
+ // Name - READ-ONLY; Azure resource name
+ Name *string `json:"name,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for OfficeConsent.
+func (oc OfficeConsent) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if oc.OfficeConsentProperties != nil {
+ objectMap["properties"] = oc.OfficeConsentProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for OfficeConsent struct.
+func (oc *OfficeConsent) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var officeConsentProperties OfficeConsentProperties
+ err = json.Unmarshal(*v, &officeConsentProperties)
+ if err != nil {
+ return err
+ }
+ oc.OfficeConsentProperties = &officeConsentProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ oc.ID = &ID
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ oc.Type = &typeVar
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ oc.Name = &name
+ }
+ }
+ }
+
+ return nil
+}
+
+// OfficeConsentList list of all the office365 consents.
+type OfficeConsentList struct {
+ autorest.Response `json:"-"`
+ // NextLink - READ-ONLY; URL to fetch the next set of office consents.
+ NextLink *string `json:"nextLink,omitempty"`
+ // Value - Array of the consents.
+ Value *[]OfficeConsent `json:"value,omitempty"`
+}
+
+// OfficeConsentListIterator provides access to a complete listing of OfficeConsent values.
+type OfficeConsentListIterator struct {
+ i int
+ page OfficeConsentListPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *OfficeConsentListIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OfficeConsentListIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *OfficeConsentListIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter OfficeConsentListIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter OfficeConsentListIterator) Response() OfficeConsentList {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter OfficeConsentListIterator) Value() OfficeConsent {
+ if !iter.page.NotDone() {
+ return OfficeConsent{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the OfficeConsentListIterator type.
+func NewOfficeConsentListIterator(page OfficeConsentListPage) OfficeConsentListIterator {
+ return OfficeConsentListIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (ocl OfficeConsentList) IsEmpty() bool {
+ return ocl.Value == nil || len(*ocl.Value) == 0
+}
+
+// officeConsentListPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (ocl OfficeConsentList) officeConsentListPreparer(ctx context.Context) (*http.Request, error) {
+ if ocl.NextLink == nil || len(to.String(ocl.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(ocl.NextLink)))
+}
+
+// OfficeConsentListPage contains a page of OfficeConsent values.
+type OfficeConsentListPage struct {
+ fn func(context.Context, OfficeConsentList) (OfficeConsentList, error)
+ ocl OfficeConsentList
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *OfficeConsentListPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OfficeConsentListPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.ocl)
+ if err != nil {
+ return err
+ }
+ page.ocl = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *OfficeConsentListPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page OfficeConsentListPage) NotDone() bool {
+ return !page.ocl.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page OfficeConsentListPage) Response() OfficeConsentList {
+ return page.ocl
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page OfficeConsentListPage) Values() []OfficeConsent {
+ if page.ocl.IsEmpty() {
+ return nil
+ }
+ return *page.ocl.Value
+}
+
+// Creates a new instance of the OfficeConsentListPage type.
+func NewOfficeConsentListPage(getNextPage func(context.Context, OfficeConsentList) (OfficeConsentList, error)) OfficeConsentListPage {
+ return OfficeConsentListPage{fn: getNextPage}
+}
+
+// OfficeConsentProperties consent property bag.
+type OfficeConsentProperties struct {
+ // TenantID - The tenantId of the Office365 with the consent.
+ TenantID *string `json:"tenantId,omitempty"`
+ // TenantName - READ-ONLY; The tenant name of the Office365 with the consent.
+ TenantName *string `json:"tenantName,omitempty"`
+}
+
+// OfficeDataConnector represents office data connector.
+type OfficeDataConnector struct {
+ // OfficeDataConnectorProperties - Office data connector properties.
+ *OfficeDataConnectorProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Azure resource Id
+ ID *string `json:"id,omitempty"`
+ // Type - READ-ONLY; Azure resource type
+ Type *string `json:"type,omitempty"`
+ // Name - READ-ONLY; Azure resource name
+ Name *string `json:"name,omitempty"`
+ // Etag - Etag of the data connector.
+ Etag *string `json:"etag,omitempty"`
+ // Kind - Possible values include: 'KindDataConnector', 'KindOffice365', 'KindThreatIntelligence', 'KindAmazonWebServicesCloudTrail', 'KindAzureActiveDirectory', 'KindAzureSecurityCenter', 'KindMicrosoftCloudAppSecurity', 'KindAzureAdvancedThreatProtection', 'KindMicrosoftDefenderAdvancedThreatProtection'
+ Kind KindBasicDataConnector `json:"kind,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for OfficeDataConnector.
+func (odc OfficeDataConnector) MarshalJSON() ([]byte, error) {
+ odc.Kind = KindOffice365
+ objectMap := make(map[string]interface{})
+ if odc.OfficeDataConnectorProperties != nil {
+ objectMap["properties"] = odc.OfficeDataConnectorProperties
+ }
+ if odc.Etag != nil {
+ objectMap["etag"] = odc.Etag
+ }
+ if odc.Kind != "" {
+ objectMap["kind"] = odc.Kind
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsOfficeDataConnector is the BasicDataConnector implementation for OfficeDataConnector.
+func (odc OfficeDataConnector) AsOfficeDataConnector() (*OfficeDataConnector, bool) {
+ return &odc, true
+}
+
+// AsTIDataConnector is the BasicDataConnector implementation for OfficeDataConnector.
+func (odc OfficeDataConnector) AsTIDataConnector() (*TIDataConnector, bool) {
+ return nil, false
+}
+
+// AsAwsCloudTrailDataConnector is the BasicDataConnector implementation for OfficeDataConnector.
+func (odc OfficeDataConnector) AsAwsCloudTrailDataConnector() (*AwsCloudTrailDataConnector, bool) {
+ return nil, false
+}
+
+// AsAADDataConnector is the BasicDataConnector implementation for OfficeDataConnector.
+func (odc OfficeDataConnector) AsAADDataConnector() (*AADDataConnector, bool) {
+ return nil, false
+}
+
+// AsASCDataConnector is the BasicDataConnector implementation for OfficeDataConnector.
+func (odc OfficeDataConnector) AsASCDataConnector() (*ASCDataConnector, bool) {
+ return nil, false
+}
+
+// AsMCASDataConnector is the BasicDataConnector implementation for OfficeDataConnector.
+func (odc OfficeDataConnector) AsMCASDataConnector() (*MCASDataConnector, bool) {
+ return nil, false
+}
+
+// AsAATPDataConnector is the BasicDataConnector implementation for OfficeDataConnector.
+func (odc OfficeDataConnector) AsAATPDataConnector() (*AATPDataConnector, bool) {
+ return nil, false
+}
+
+// AsMDATPDataConnector is the BasicDataConnector implementation for OfficeDataConnector.
+func (odc OfficeDataConnector) AsMDATPDataConnector() (*MDATPDataConnector, bool) {
+ return nil, false
+}
+
+// AsDataConnector is the BasicDataConnector implementation for OfficeDataConnector.
+func (odc OfficeDataConnector) AsDataConnector() (*DataConnector, bool) {
+ return nil, false
+}
+
+// AsBasicDataConnector is the BasicDataConnector implementation for OfficeDataConnector.
+func (odc OfficeDataConnector) AsBasicDataConnector() (BasicDataConnector, bool) {
+ return &odc, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for OfficeDataConnector struct.
+func (odc *OfficeDataConnector) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var officeDataConnectorProperties OfficeDataConnectorProperties
+ err = json.Unmarshal(*v, &officeDataConnectorProperties)
+ if err != nil {
+ return err
+ }
+ odc.OfficeDataConnectorProperties = &officeDataConnectorProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ odc.ID = &ID
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ odc.Type = &typeVar
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ odc.Name = &name
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ odc.Etag = &etag
+ }
+ case "kind":
+ if v != nil {
+ var kind KindBasicDataConnector
+ err = json.Unmarshal(*v, &kind)
+ if err != nil {
+ return err
+ }
+ odc.Kind = kind
+ }
+ }
+ }
+
+ return nil
+}
+
+// OfficeDataConnectorDataTypes the available data types for office data connector.
+type OfficeDataConnectorDataTypes struct {
+ // SharePoint - SharePoint data type connection.
+ SharePoint *OfficeDataConnectorDataTypesSharePoint `json:"sharePoint,omitempty"`
+ // Exchange - Exchange data type connection.
+ Exchange *OfficeDataConnectorDataTypesExchange `json:"exchange,omitempty"`
+}
+
+// OfficeDataConnectorDataTypesExchange exchange data type connection.
+type OfficeDataConnectorDataTypesExchange struct {
+ // State - Describe whether this data type connection is enabled or not. Possible values include: 'Enabled', 'Disabled'
+ State DataTypeState `json:"state,omitempty"`
+}
+
+// OfficeDataConnectorDataTypesSharePoint sharePoint data type connection.
+type OfficeDataConnectorDataTypesSharePoint struct {
+ // State - Describe whether this data type connection is enabled or not. Possible values include: 'Enabled', 'Disabled'
+ State DataTypeState `json:"state,omitempty"`
+}
+
+// OfficeDataConnectorProperties office data connector properties.
+type OfficeDataConnectorProperties struct {
+ // DataTypes - The available data types for the connector.
+ DataTypes *OfficeDataConnectorDataTypes `json:"dataTypes,omitempty"`
+ // TenantID - The tenant id to connect to, and get the data from.
+ TenantID *string `json:"tenantId,omitempty"`
+}
+
+// Operation operation provided by provider
type Operation struct {
// Name - Name of the operation
Name *string `json:"name,omitempty"`
- // Display - Properties of the operation
- Display *OperationDisplay `json:"display,omitempty"`
+ // Display - Properties of the operation
+ Display *OperationDisplay `json:"display,omitempty"`
+}
+
+// OperationDisplay properties of the operation
+type OperationDisplay struct {
+ // Provider - Provider name
+ Provider *string `json:"provider,omitempty"`
+ // Resource - Resource name
+ Resource *string `json:"resource,omitempty"`
+ // Operation - Operation name
+ Operation *string `json:"operation,omitempty"`
+ // Description - Description of the operation
+ Description *string `json:"description,omitempty"`
+}
+
+// OperationsList lists the operations available in the SecurityInsights RP.
+type OperationsList struct {
+ autorest.Response `json:"-"`
+ // NextLink - URL to fetch the next set of operations.
+ NextLink *string `json:"nextLink,omitempty"`
+ // Value - Array of operations
+ Value *[]Operation `json:"value,omitempty"`
+}
+
+// OperationsListIterator provides access to a complete listing of Operation values.
+type OperationsListIterator struct {
+ i int
+ page OperationsListPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *OperationsListIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OperationsListIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *OperationsListIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter OperationsListIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter OperationsListIterator) Response() OperationsList {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter OperationsListIterator) Value() Operation {
+ if !iter.page.NotDone() {
+ return Operation{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the OperationsListIterator type.
+func NewOperationsListIterator(page OperationsListPage) OperationsListIterator {
+ return OperationsListIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (ol OperationsList) IsEmpty() bool {
+ return ol.Value == nil || len(*ol.Value) == 0
+}
+
+// operationsListPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (ol OperationsList) operationsListPreparer(ctx context.Context) (*http.Request, error) {
+ if ol.NextLink == nil || len(to.String(ol.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(ol.NextLink)))
+}
+
+// OperationsListPage contains a page of Operation values.
+type OperationsListPage struct {
+ fn func(context.Context, OperationsList) (OperationsList, error)
+ ol OperationsList
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *OperationsListPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OperationsListPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.ol)
+ if err != nil {
+ return err
+ }
+ page.ol = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *OperationsListPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page OperationsListPage) NotDone() bool {
+ return !page.ol.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page OperationsListPage) Response() OperationsList {
+ return page.ol
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page OperationsListPage) Values() []Operation {
+ if page.ol.IsEmpty() {
+ return nil
+ }
+ return *page.ol.Value
+}
+
+// Creates a new instance of the OperationsListPage type.
+func NewOperationsListPage(getNextPage func(context.Context, OperationsList) (OperationsList, error)) OperationsListPage {
+ return OperationsListPage{fn: getNextPage}
+}
+
+// ProcessEntity represents a process entity.
+type ProcessEntity struct {
+ // ProcessEntityProperties - Process entity properties
+ *ProcessEntityProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Azure resource Id
+ ID *string `json:"id,omitempty"`
+ // Type - READ-ONLY; Azure resource type
+ Type *string `json:"type,omitempty"`
+ // Name - READ-ONLY; Azure resource name
+ Name *string `json:"name,omitempty"`
+ // Kind - Possible values include: 'KindEntity', 'KindAccount', 'KindHost', 'KindFile', 'KindSecurityAlert', 'KindFileHash', 'KindMalware', 'KindSecurityGroup', 'KindAzureResource', 'KindCloudApplication', 'KindProcess', 'KindDNSResolution', 'KindIP', 'KindRegistryKey', 'KindRegistryValue', 'KindURL'
+ Kind KindBasicEntity `json:"kind,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ProcessEntity.
+func (peVar ProcessEntity) MarshalJSON() ([]byte, error) {
+ peVar.Kind = KindProcess
+ objectMap := make(map[string]interface{})
+ if peVar.ProcessEntityProperties != nil {
+ objectMap["properties"] = peVar.ProcessEntityProperties
+ }
+ if peVar.Kind != "" {
+ objectMap["kind"] = peVar.Kind
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsAccountEntity is the BasicEntity implementation for ProcessEntity.
+func (peVar ProcessEntity) AsAccountEntity() (*AccountEntity, bool) {
+ return nil, false
+}
+
+// AsHostEntity is the BasicEntity implementation for ProcessEntity.
+func (peVar ProcessEntity) AsHostEntity() (*HostEntity, bool) {
+ return nil, false
+}
+
+// AsFileEntity is the BasicEntity implementation for ProcessEntity.
+func (peVar ProcessEntity) AsFileEntity() (*FileEntity, bool) {
+ return nil, false
+}
+
+// AsSecurityAlert is the BasicEntity implementation for ProcessEntity.
+func (peVar ProcessEntity) AsSecurityAlert() (*SecurityAlert, bool) {
+ return nil, false
+}
+
+// AsFileHashEntity is the BasicEntity implementation for ProcessEntity.
+func (peVar ProcessEntity) AsFileHashEntity() (*FileHashEntity, bool) {
+ return nil, false
+}
+
+// AsMalwareEntity is the BasicEntity implementation for ProcessEntity.
+func (peVar ProcessEntity) AsMalwareEntity() (*MalwareEntity, bool) {
+ return nil, false
+}
+
+// AsSecurityGroupEntity is the BasicEntity implementation for ProcessEntity.
+func (peVar ProcessEntity) AsSecurityGroupEntity() (*SecurityGroupEntity, bool) {
+ return nil, false
+}
+
+// AsAzureResourceEntity is the BasicEntity implementation for ProcessEntity.
+func (peVar ProcessEntity) AsAzureResourceEntity() (*AzureResourceEntity, bool) {
+ return nil, false
+}
+
+// AsCloudApplicationEntity is the BasicEntity implementation for ProcessEntity.
+func (peVar ProcessEntity) AsCloudApplicationEntity() (*CloudApplicationEntity, bool) {
+ return nil, false
+}
+
+// AsProcessEntity is the BasicEntity implementation for ProcessEntity.
+func (peVar ProcessEntity) AsProcessEntity() (*ProcessEntity, bool) {
+ return &peVar, true
+}
+
+// AsDNSEntity is the BasicEntity implementation for ProcessEntity.
+func (peVar ProcessEntity) AsDNSEntity() (*DNSEntity, bool) {
+ return nil, false
+}
+
+// AsIPEntity is the BasicEntity implementation for ProcessEntity.
+func (peVar ProcessEntity) AsIPEntity() (*IPEntity, bool) {
+ return nil, false
+}
+
+// AsRegistryKeyEntity is the BasicEntity implementation for ProcessEntity.
+func (peVar ProcessEntity) AsRegistryKeyEntity() (*RegistryKeyEntity, bool) {
+ return nil, false
+}
+
+// AsRegistryValueEntity is the BasicEntity implementation for ProcessEntity.
+func (peVar ProcessEntity) AsRegistryValueEntity() (*RegistryValueEntity, bool) {
+ return nil, false
+}
+
+// AsURLEntity is the BasicEntity implementation for ProcessEntity.
+func (peVar ProcessEntity) AsURLEntity() (*URLEntity, bool) {
+ return nil, false
+}
+
+// AsEntity is the BasicEntity implementation for ProcessEntity.
+func (peVar ProcessEntity) AsEntity() (*Entity, bool) {
+ return nil, false
+}
+
+// AsBasicEntity is the BasicEntity implementation for ProcessEntity.
+func (peVar ProcessEntity) AsBasicEntity() (BasicEntity, bool) {
+ return &peVar, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for ProcessEntity struct.
+func (peVar *ProcessEntity) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var processEntityProperties ProcessEntityProperties
+ err = json.Unmarshal(*v, &processEntityProperties)
+ if err != nil {
+ return err
+ }
+ peVar.ProcessEntityProperties = &processEntityProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ peVar.ID = &ID
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ peVar.Type = &typeVar
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ peVar.Name = &name
+ }
+ case "kind":
+ if v != nil {
+ var kind KindBasicEntity
+ err = json.Unmarshal(*v, &kind)
+ if err != nil {
+ return err
+ }
+ peVar.Kind = kind
+ }
+ }
+ }
+
+ return nil
+}
+
+// ProcessEntityProperties process entity property bag.
+type ProcessEntityProperties struct {
+ // ProcessID - READ-ONLY; The process ID
+ ProcessID *string `json:"processId,omitempty"`
+ // CommandLine - READ-ONLY; The command line used to create the process
+ CommandLine *string `json:"commandLine,omitempty"`
+ // ElevationToken - The elevation token associated with the process. Possible values include: 'Default', 'Full', 'Limited'
+ ElevationToken ElevationToken `json:"elevationToken,omitempty"`
+ // CreationTimeUtc - READ-ONLY; The time when the process started to run
+ CreationTimeUtc *date.Time `json:"creationTimeUtc,omitempty"`
+ // ImageFileEntityID - READ-ONLY; Image file entity id
+ ImageFileEntityID *string `json:"imageFileEntityId,omitempty"`
+ // AccountEntityID - READ-ONLY; The account entity id running the processes.
+ AccountEntityID *string `json:"accountEntityId,omitempty"`
+ // ParentProcessEntityID - READ-ONLY; The parent process entity id.
+ ParentProcessEntityID *string `json:"parentProcessEntityId,omitempty"`
+ // HostEntityID - READ-ONLY; The host entity id on which the process was running
+ HostEntityID *string `json:"hostEntityId,omitempty"`
+ // HostLogonSessionEntityID - READ-ONLY; The session entity id in which the process was running
+ HostLogonSessionEntityID *string `json:"hostLogonSessionEntityId,omitempty"`
+ // FriendlyName - READ-ONLY; The graph item display name which is a short humanly readable description of the graph item instance. This property is optional and might be system generated.
+ FriendlyName *string `json:"friendlyName,omitempty"`
+ // AdditionalData - READ-ONLY; A bag of custom fields that should be part of the entity and will be presented to the user.
+ AdditionalData map[string]interface{} `json:"additionalData"`
+}
+
+// MarshalJSON is the custom marshaler for ProcessEntityProperties.
+func (pep ProcessEntityProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if pep.ElevationToken != "" {
+ objectMap["elevationToken"] = pep.ElevationToken
+ }
+ return json.Marshal(objectMap)
+}
+
+// RegistryKeyEntity represents a registry key entity.
+type RegistryKeyEntity struct {
+ // RegistryKeyEntityProperties - RegistryKey entity properties
+ *RegistryKeyEntityProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Azure resource Id
+ ID *string `json:"id,omitempty"`
+ // Type - READ-ONLY; Azure resource type
+ Type *string `json:"type,omitempty"`
+ // Name - READ-ONLY; Azure resource name
+ Name *string `json:"name,omitempty"`
+ // Kind - Possible values include: 'KindEntity', 'KindAccount', 'KindHost', 'KindFile', 'KindSecurityAlert', 'KindFileHash', 'KindMalware', 'KindSecurityGroup', 'KindAzureResource', 'KindCloudApplication', 'KindProcess', 'KindDNSResolution', 'KindIP', 'KindRegistryKey', 'KindRegistryValue', 'KindURL'
+ Kind KindBasicEntity `json:"kind,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for RegistryKeyEntity.
+func (rke RegistryKeyEntity) MarshalJSON() ([]byte, error) {
+ rke.Kind = KindRegistryKey
+ objectMap := make(map[string]interface{})
+ if rke.RegistryKeyEntityProperties != nil {
+ objectMap["properties"] = rke.RegistryKeyEntityProperties
+ }
+ if rke.Kind != "" {
+ objectMap["kind"] = rke.Kind
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsAccountEntity is the BasicEntity implementation for RegistryKeyEntity.
+func (rke RegistryKeyEntity) AsAccountEntity() (*AccountEntity, bool) {
+ return nil, false
+}
+
+// AsHostEntity is the BasicEntity implementation for RegistryKeyEntity.
+func (rke RegistryKeyEntity) AsHostEntity() (*HostEntity, bool) {
+ return nil, false
+}
+
+// AsFileEntity is the BasicEntity implementation for RegistryKeyEntity.
+func (rke RegistryKeyEntity) AsFileEntity() (*FileEntity, bool) {
+ return nil, false
+}
+
+// AsSecurityAlert is the BasicEntity implementation for RegistryKeyEntity.
+func (rke RegistryKeyEntity) AsSecurityAlert() (*SecurityAlert, bool) {
+ return nil, false
+}
+
+// AsFileHashEntity is the BasicEntity implementation for RegistryKeyEntity.
+func (rke RegistryKeyEntity) AsFileHashEntity() (*FileHashEntity, bool) {
+ return nil, false
+}
+
+// AsMalwareEntity is the BasicEntity implementation for RegistryKeyEntity.
+func (rke RegistryKeyEntity) AsMalwareEntity() (*MalwareEntity, bool) {
+ return nil, false
+}
+
+// AsSecurityGroupEntity is the BasicEntity implementation for RegistryKeyEntity.
+func (rke RegistryKeyEntity) AsSecurityGroupEntity() (*SecurityGroupEntity, bool) {
+ return nil, false
+}
+
+// AsAzureResourceEntity is the BasicEntity implementation for RegistryKeyEntity.
+func (rke RegistryKeyEntity) AsAzureResourceEntity() (*AzureResourceEntity, bool) {
+ return nil, false
+}
+
+// AsCloudApplicationEntity is the BasicEntity implementation for RegistryKeyEntity.
+func (rke RegistryKeyEntity) AsCloudApplicationEntity() (*CloudApplicationEntity, bool) {
+ return nil, false
+}
+
+// AsProcessEntity is the BasicEntity implementation for RegistryKeyEntity.
+func (rke RegistryKeyEntity) AsProcessEntity() (*ProcessEntity, bool) {
+ return nil, false
+}
+
+// AsDNSEntity is the BasicEntity implementation for RegistryKeyEntity.
+func (rke RegistryKeyEntity) AsDNSEntity() (*DNSEntity, bool) {
+ return nil, false
+}
+
+// AsIPEntity is the BasicEntity implementation for RegistryKeyEntity.
+func (rke RegistryKeyEntity) AsIPEntity() (*IPEntity, bool) {
+ return nil, false
+}
+
+// AsRegistryKeyEntity is the BasicEntity implementation for RegistryKeyEntity.
+func (rke RegistryKeyEntity) AsRegistryKeyEntity() (*RegistryKeyEntity, bool) {
+ return &rke, true
+}
+
+// AsRegistryValueEntity is the BasicEntity implementation for RegistryKeyEntity.
+func (rke RegistryKeyEntity) AsRegistryValueEntity() (*RegistryValueEntity, bool) {
+ return nil, false
+}
+
+// AsURLEntity is the BasicEntity implementation for RegistryKeyEntity.
+func (rke RegistryKeyEntity) AsURLEntity() (*URLEntity, bool) {
+ return nil, false
+}
+
+// AsEntity is the BasicEntity implementation for RegistryKeyEntity.
+func (rke RegistryKeyEntity) AsEntity() (*Entity, bool) {
+ return nil, false
+}
+
+// AsBasicEntity is the BasicEntity implementation for RegistryKeyEntity.
+func (rke RegistryKeyEntity) AsBasicEntity() (BasicEntity, bool) {
+ return &rke, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for RegistryKeyEntity struct.
+func (rke *RegistryKeyEntity) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var registryKeyEntityProperties RegistryKeyEntityProperties
+ err = json.Unmarshal(*v, ®istryKeyEntityProperties)
+ if err != nil {
+ return err
+ }
+ rke.RegistryKeyEntityProperties = ®istryKeyEntityProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ rke.ID = &ID
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ rke.Type = &typeVar
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ rke.Name = &name
+ }
+ case "kind":
+ if v != nil {
+ var kind KindBasicEntity
+ err = json.Unmarshal(*v, &kind)
+ if err != nil {
+ return err
+ }
+ rke.Kind = kind
+ }
+ }
+ }
+
+ return nil
+}
+
+// RegistryKeyEntityProperties registryKey entity property bag.
+type RegistryKeyEntityProperties struct {
+ // Hive - READ-ONLY; the hive that holds the registry key. Possible values include: 'HKEYLOCALMACHINE', 'HKEYCLASSESROOT', 'HKEYCURRENTCONFIG', 'HKEYUSERS', 'HKEYCURRENTUSERLOCALSETTINGS', 'HKEYPERFORMANCEDATA', 'HKEYPERFORMANCENLSTEXT', 'HKEYPERFORMANCETEXT', 'HKEYA', 'HKEYCURRENTUSER'
+ Hive RegistryHive `json:"hive,omitempty"`
+ // Key - READ-ONLY; The registry key path.
+ Key *string `json:"key,omitempty"`
+ // FriendlyName - READ-ONLY; The graph item display name which is a short humanly readable description of the graph item instance. This property is optional and might be system generated.
+ FriendlyName *string `json:"friendlyName,omitempty"`
+ // AdditionalData - READ-ONLY; A bag of custom fields that should be part of the entity and will be presented to the user.
+ AdditionalData map[string]interface{} `json:"additionalData"`
+}
+
+// MarshalJSON is the custom marshaler for RegistryKeyEntityProperties.
+func (rkep RegistryKeyEntityProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// RegistryValueEntity represents a registry value entity.
+type RegistryValueEntity struct {
+ // RegistryValueEntityProperties - RegistryKey entity properties
+ *RegistryValueEntityProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Azure resource Id
+ ID *string `json:"id,omitempty"`
+ // Type - READ-ONLY; Azure resource type
+ Type *string `json:"type,omitempty"`
+ // Name - READ-ONLY; Azure resource name
+ Name *string `json:"name,omitempty"`
+ // Kind - Possible values include: 'KindEntity', 'KindAccount', 'KindHost', 'KindFile', 'KindSecurityAlert', 'KindFileHash', 'KindMalware', 'KindSecurityGroup', 'KindAzureResource', 'KindCloudApplication', 'KindProcess', 'KindDNSResolution', 'KindIP', 'KindRegistryKey', 'KindRegistryValue', 'KindURL'
+ Kind KindBasicEntity `json:"kind,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for RegistryValueEntity.
+func (rve RegistryValueEntity) MarshalJSON() ([]byte, error) {
+ rve.Kind = KindRegistryValue
+ objectMap := make(map[string]interface{})
+ if rve.RegistryValueEntityProperties != nil {
+ objectMap["properties"] = rve.RegistryValueEntityProperties
+ }
+ if rve.Kind != "" {
+ objectMap["kind"] = rve.Kind
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsAccountEntity is the BasicEntity implementation for RegistryValueEntity.
+func (rve RegistryValueEntity) AsAccountEntity() (*AccountEntity, bool) {
+ return nil, false
+}
+
+// AsHostEntity is the BasicEntity implementation for RegistryValueEntity.
+func (rve RegistryValueEntity) AsHostEntity() (*HostEntity, bool) {
+ return nil, false
+}
+
+// AsFileEntity is the BasicEntity implementation for RegistryValueEntity.
+func (rve RegistryValueEntity) AsFileEntity() (*FileEntity, bool) {
+ return nil, false
+}
+
+// AsSecurityAlert is the BasicEntity implementation for RegistryValueEntity.
+func (rve RegistryValueEntity) AsSecurityAlert() (*SecurityAlert, bool) {
+ return nil, false
+}
+
+// AsFileHashEntity is the BasicEntity implementation for RegistryValueEntity.
+func (rve RegistryValueEntity) AsFileHashEntity() (*FileHashEntity, bool) {
+ return nil, false
+}
+
+// AsMalwareEntity is the BasicEntity implementation for RegistryValueEntity.
+func (rve RegistryValueEntity) AsMalwareEntity() (*MalwareEntity, bool) {
+ return nil, false
+}
+
+// AsSecurityGroupEntity is the BasicEntity implementation for RegistryValueEntity.
+func (rve RegistryValueEntity) AsSecurityGroupEntity() (*SecurityGroupEntity, bool) {
+ return nil, false
+}
+
+// AsAzureResourceEntity is the BasicEntity implementation for RegistryValueEntity.
+func (rve RegistryValueEntity) AsAzureResourceEntity() (*AzureResourceEntity, bool) {
+ return nil, false
+}
+
+// AsCloudApplicationEntity is the BasicEntity implementation for RegistryValueEntity.
+func (rve RegistryValueEntity) AsCloudApplicationEntity() (*CloudApplicationEntity, bool) {
+ return nil, false
+}
+
+// AsProcessEntity is the BasicEntity implementation for RegistryValueEntity.
+func (rve RegistryValueEntity) AsProcessEntity() (*ProcessEntity, bool) {
+ return nil, false
+}
+
+// AsDNSEntity is the BasicEntity implementation for RegistryValueEntity.
+func (rve RegistryValueEntity) AsDNSEntity() (*DNSEntity, bool) {
+ return nil, false
+}
+
+// AsIPEntity is the BasicEntity implementation for RegistryValueEntity.
+func (rve RegistryValueEntity) AsIPEntity() (*IPEntity, bool) {
+ return nil, false
+}
+
+// AsRegistryKeyEntity is the BasicEntity implementation for RegistryValueEntity.
+func (rve RegistryValueEntity) AsRegistryKeyEntity() (*RegistryKeyEntity, bool) {
+ return nil, false
+}
+
+// AsRegistryValueEntity is the BasicEntity implementation for RegistryValueEntity.
+func (rve RegistryValueEntity) AsRegistryValueEntity() (*RegistryValueEntity, bool) {
+ return &rve, true
+}
+
+// AsURLEntity is the BasicEntity implementation for RegistryValueEntity.
+func (rve RegistryValueEntity) AsURLEntity() (*URLEntity, bool) {
+ return nil, false
+}
+
+// AsEntity is the BasicEntity implementation for RegistryValueEntity.
+func (rve RegistryValueEntity) AsEntity() (*Entity, bool) {
+ return nil, false
+}
+
+// AsBasicEntity is the BasicEntity implementation for RegistryValueEntity.
+func (rve RegistryValueEntity) AsBasicEntity() (BasicEntity, bool) {
+ return &rve, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for RegistryValueEntity struct.
+func (rve *RegistryValueEntity) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var registryValueEntityProperties RegistryValueEntityProperties
+ err = json.Unmarshal(*v, ®istryValueEntityProperties)
+ if err != nil {
+ return err
+ }
+ rve.RegistryValueEntityProperties = ®istryValueEntityProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ rve.ID = &ID
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ rve.Type = &typeVar
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ rve.Name = &name
+ }
+ case "kind":
+ if v != nil {
+ var kind KindBasicEntity
+ err = json.Unmarshal(*v, &kind)
+ if err != nil {
+ return err
+ }
+ rve.Kind = kind
+ }
+ }
+ }
+
+ return nil
+}
+
+// RegistryValueEntityProperties registryValue entity property bag.
+type RegistryValueEntityProperties struct {
+ // ValueName - READ-ONLY; The registry value name.
+ ValueName *string `json:"valueName,omitempty"`
+ // ValueData - READ-ONLY; String formatted representation of the value data.
+ ValueData *string `json:"valueData,omitempty"`
+ // ValueType - READ-ONLY; Specifies the data types to use when storing values in the registry, or identifies the data type of a value in the registry. Possible values include: 'RegistryValueKindNone', 'RegistryValueKindUnknown', 'RegistryValueKindString', 'RegistryValueKindExpandString', 'RegistryValueKindBinary', 'RegistryValueKindDWord', 'RegistryValueKindMultiString', 'RegistryValueKindQWord'
+ ValueType RegistryValueKind `json:"valueType,omitempty"`
+ // KeyEntityID - READ-ONLY; The registry key entity id.
+ KeyEntityID *string `json:"keyEntityId,omitempty"`
+ // FriendlyName - READ-ONLY; The graph item display name which is a short humanly readable description of the graph item instance. This property is optional and might be system generated.
+ FriendlyName *string `json:"friendlyName,omitempty"`
+ // AdditionalData - READ-ONLY; A bag of custom fields that should be part of the entity and will be presented to the user.
+ AdditionalData map[string]interface{} `json:"additionalData"`
+}
+
+// MarshalJSON is the custom marshaler for RegistryValueEntityProperties.
+func (rvep RegistryValueEntityProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
+// Resource an azure resource object
+type Resource struct {
+ // ID - READ-ONLY; Azure resource Id
+ ID *string `json:"id,omitempty"`
+ // Type - READ-ONLY; Azure resource type
+ Type *string `json:"type,omitempty"`
+ // Name - READ-ONLY; Azure resource name
+ Name *string `json:"name,omitempty"`
+}
+
+// ScheduledAlertRule represents scheduled alert rule.
+type ScheduledAlertRule struct {
+ // ScheduledAlertRuleProperties - Scheduled alert rule properties
+ *ScheduledAlertRuleProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Azure resource Id
+ ID *string `json:"id,omitempty"`
+ // Type - READ-ONLY; Azure resource type
+ Type *string `json:"type,omitempty"`
+ // Name - READ-ONLY; Azure resource name
+ Name *string `json:"name,omitempty"`
+ // Etag - Etag of the alert rule.
+ Etag *string `json:"etag,omitempty"`
+ // Kind - Possible values include: 'KindAlertRule', 'KindScheduled'
+ Kind Kind `json:"kind,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ScheduledAlertRule.
+func (sar ScheduledAlertRule) MarshalJSON() ([]byte, error) {
+ sar.Kind = KindScheduled
+ objectMap := make(map[string]interface{})
+ if sar.ScheduledAlertRuleProperties != nil {
+ objectMap["properties"] = sar.ScheduledAlertRuleProperties
+ }
+ if sar.Etag != nil {
+ objectMap["etag"] = sar.Etag
+ }
+ if sar.Kind != "" {
+ objectMap["kind"] = sar.Kind
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsScheduledAlertRule is the BasicAlertRule implementation for ScheduledAlertRule.
+func (sar ScheduledAlertRule) AsScheduledAlertRule() (*ScheduledAlertRule, bool) {
+ return &sar, true
+}
+
+// AsAlertRule is the BasicAlertRule implementation for ScheduledAlertRule.
+func (sar ScheduledAlertRule) AsAlertRule() (*AlertRule, bool) {
+ return nil, false
+}
+
+// AsBasicAlertRule is the BasicAlertRule implementation for ScheduledAlertRule.
+func (sar ScheduledAlertRule) AsBasicAlertRule() (BasicAlertRule, bool) {
+ return &sar, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for ScheduledAlertRule struct.
+func (sar *ScheduledAlertRule) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var scheduledAlertRuleProperties ScheduledAlertRuleProperties
+ err = json.Unmarshal(*v, &scheduledAlertRuleProperties)
+ if err != nil {
+ return err
+ }
+ sar.ScheduledAlertRuleProperties = &scheduledAlertRuleProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ sar.ID = &ID
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ sar.Type = &typeVar
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ sar.Name = &name
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ sar.Etag = &etag
+ }
+ case "kind":
+ if v != nil {
+ var kind Kind
+ err = json.Unmarshal(*v, &kind)
+ if err != nil {
+ return err
+ }
+ sar.Kind = kind
+ }
+ }
+ }
+
+ return nil
+}
+
+// ScheduledAlertRuleProperties alert rule property bag.
+type ScheduledAlertRuleProperties struct {
+ // DisplayName - The display name for alerts created by this alert rule.
+ DisplayName *string `json:"displayName,omitempty"`
+ // Description - The description of the alert rule.
+ Description *string `json:"description,omitempty"`
+ // Severity - The severity for alerts created by this alert rule. Possible values include: 'High', 'Medium', 'Low', 'Informational'
+ Severity AlertSeverity `json:"severity,omitempty"`
+ // Enabled - Determines whether this alert rule is enabled or disabled.
+ Enabled *bool `json:"enabled,omitempty"`
+ // Query - The query that creates alerts for this rule.
+ Query *string `json:"query,omitempty"`
+ // QueryFrequency - The frequency (in ISO 8601 duration format) for this alert rule to run.
+ QueryFrequency *string `json:"queryFrequency,omitempty"`
+ // QueryPeriod - The period (in ISO 8601 duration format) that this alert rule looks at.
+ QueryPeriod *string `json:"queryPeriod,omitempty"`
+ // TriggerOperator - The operation against the threshold that triggers alert rule. Possible values include: 'GreaterThan', 'LessThan', 'Equal', 'NotEqual'
+ TriggerOperator TriggerOperator `json:"triggerOperator,omitempty"`
+ // TriggerThreshold - The threshold triggers this alert rule.
+ TriggerThreshold *int32 `json:"triggerThreshold,omitempty"`
+ // SuppressionEnabled - Determines whether the suppression for this alert rule is enabled or disabled.
+ SuppressionEnabled *bool `json:"suppressionEnabled,omitempty"`
+ // SuppressionDuration - The suppression (in ISO 8601 duration format) to wait since last time this alert rule been triggered.
+ SuppressionDuration *string `json:"suppressionDuration,omitempty"`
+ // LastModifiedUtc - READ-ONLY; The last time that this alert has been modified.
+ LastModifiedUtc *string `json:"lastModifiedUtc,omitempty"`
+}
+
+// ScheduledAlertRuleTemplate represents scheduled alert rule template.
+type ScheduledAlertRuleTemplate struct {
+ // ScheduledAlertRuleTemplateProperties - Scheduled alert rule template properties
+ *ScheduledAlertRuleTemplateProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Azure resource Id
+ ID *string `json:"id,omitempty"`
+ // Type - READ-ONLY; Azure resource type
+ Type *string `json:"type,omitempty"`
+ // Name - READ-ONLY; Azure resource name
+ Name *string `json:"name,omitempty"`
+ // Etag - Etag of the alert rule.
+ Etag *string `json:"etag,omitempty"`
+ // Kind - Possible values include: 'KindBasicAlertRuleTemplateKindAlertRuleTemplate', 'KindBasicAlertRuleTemplateKindScheduled', 'KindBasicAlertRuleTemplateKindFilter', 'KindBasicAlertRuleTemplateKindFusion'
+ Kind KindBasicAlertRuleTemplate `json:"kind,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ScheduledAlertRuleTemplate.
+func (sart ScheduledAlertRuleTemplate) MarshalJSON() ([]byte, error) {
+ sart.Kind = KindBasicAlertRuleTemplateKindScheduled
+ objectMap := make(map[string]interface{})
+ if sart.ScheduledAlertRuleTemplateProperties != nil {
+ objectMap["properties"] = sart.ScheduledAlertRuleTemplateProperties
+ }
+ if sart.Etag != nil {
+ objectMap["etag"] = sart.Etag
+ }
+ if sart.Kind != "" {
+ objectMap["kind"] = sart.Kind
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsScheduledAlertRuleTemplate is the BasicAlertRuleTemplate implementation for ScheduledAlertRuleTemplate.
+func (sart ScheduledAlertRuleTemplate) AsScheduledAlertRuleTemplate() (*ScheduledAlertRuleTemplate, bool) {
+ return &sart, true
+}
+
+// AsFilterAlertRuleTemplate is the BasicAlertRuleTemplate implementation for ScheduledAlertRuleTemplate.
+func (sart ScheduledAlertRuleTemplate) AsFilterAlertRuleTemplate() (*FilterAlertRuleTemplate, bool) {
+ return nil, false
+}
+
+// AsFusionAlertRuleTemplate is the BasicAlertRuleTemplate implementation for ScheduledAlertRuleTemplate.
+func (sart ScheduledAlertRuleTemplate) AsFusionAlertRuleTemplate() (*FusionAlertRuleTemplate, bool) {
+ return nil, false
+}
+
+// AsAlertRuleTemplate is the BasicAlertRuleTemplate implementation for ScheduledAlertRuleTemplate.
+func (sart ScheduledAlertRuleTemplate) AsAlertRuleTemplate() (*AlertRuleTemplate, bool) {
+ return nil, false
+}
+
+// AsBasicAlertRuleTemplate is the BasicAlertRuleTemplate implementation for ScheduledAlertRuleTemplate.
+func (sart ScheduledAlertRuleTemplate) AsBasicAlertRuleTemplate() (BasicAlertRuleTemplate, bool) {
+ return &sart, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for ScheduledAlertRuleTemplate struct.
+func (sart *ScheduledAlertRuleTemplate) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var scheduledAlertRuleTemplateProperties ScheduledAlertRuleTemplateProperties
+ err = json.Unmarshal(*v, &scheduledAlertRuleTemplateProperties)
+ if err != nil {
+ return err
+ }
+ sart.ScheduledAlertRuleTemplateProperties = &scheduledAlertRuleTemplateProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ sart.ID = &ID
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ sart.Type = &typeVar
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ sart.Name = &name
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ sart.Etag = &etag
+ }
+ case "kind":
+ if v != nil {
+ var kind KindBasicAlertRuleTemplate
+ err = json.Unmarshal(*v, &kind)
+ if err != nil {
+ return err
+ }
+ sart.Kind = kind
+ }
+ }
+ }
+
+ return nil
}
-// OperationDisplay properties of the operation
-type OperationDisplay struct {
- // Provider - Provider name
- Provider *string `json:"provider,omitempty"`
- // Resource - Resource name
- Resource *string `json:"resource,omitempty"`
- // Operation - Operation name
- Operation *string `json:"operation,omitempty"`
- // Description - Description of the operation
+// ScheduledAlertRuleTemplateProperties scheduled alert rule template properties
+type ScheduledAlertRuleTemplateProperties struct {
+ // DisplayName - The display name for alert rule template.
+ DisplayName *string `json:"displayName,omitempty"`
+ // Description - The description of the alert rule template.
Description *string `json:"description,omitempty"`
+ // Tactics - The tactics of the alert rule template
+ Tactics *[]AttackTactic `json:"tactics,omitempty"`
+ // CreatedDateUTC - READ-ONLY; The time that this alert rule template has been added.
+ CreatedDateUTC *string `json:"createdDateUTC,omitempty"`
+ // Status - The alert rule template status. Possible values include: 'Installed', 'Available', 'NotAvailable'
+ Status TemplateStatus `json:"status,omitempty"`
+ // RequiredDataConnectors - The required data connectors for this template
+ RequiredDataConnectors *[]DataConnectorStatus `json:"requiredDataConnectors,omitempty"`
+ // AlertRulesCreatedByTemplateCount - the number of alert rules that were created by this template
+ AlertRulesCreatedByTemplateCount *int32 `json:"alertRulesCreatedByTemplateCount,omitempty"`
+ // Severity - The severity for alerts created by this alert rule. Possible values include: 'High', 'Medium', 'Low', 'Informational'
+ Severity AlertSeverity `json:"severity,omitempty"`
+ // Query - The query that creates alerts for this rule.
+ Query *string `json:"query,omitempty"`
+ // QueryFrequency - The frequency (in ISO 8601 duration format) for this alert rule to run.
+ QueryFrequency *string `json:"queryFrequency,omitempty"`
+ // QueryPeriod - The period (in ISO 8601 duration format) that this alert rule looks at.
+ QueryPeriod *string `json:"queryPeriod,omitempty"`
+ // TriggerOperator - The operation against the threshold that triggers alert rule. Possible values include: 'GreaterThan', 'LessThan', 'Equal', 'NotEqual'
+ TriggerOperator TriggerOperator `json:"triggerOperator,omitempty"`
+ // TriggerThreshold - The threshold triggers this alert rule.
+ TriggerThreshold *int32 `json:"triggerThreshold,omitempty"`
}
-// OperationsList lists the operations available in the SecurityInsights RP.
-type OperationsList struct {
- autorest.Response `json:"-"`
- // NextLink - URL to fetch the next set of operations.
- NextLink *string `json:"nextLink,omitempty"`
- // Value - Array of operations
- Value *[]Operation `json:"value,omitempty"`
+// ScheduledAlertRuleTemplatePropertiesModel schedule alert rule template property bag.
+type ScheduledAlertRuleTemplatePropertiesModel struct {
+ // Severity - The severity for alerts created by this alert rule. Possible values include: 'High', 'Medium', 'Low', 'Informational'
+ Severity AlertSeverity `json:"severity,omitempty"`
+ // Query - The query that creates alerts for this rule.
+ Query *string `json:"query,omitempty"`
+ // QueryFrequency - The frequency (in ISO 8601 duration format) for this alert rule to run.
+ QueryFrequency *string `json:"queryFrequency,omitempty"`
+ // QueryPeriod - The period (in ISO 8601 duration format) that this alert rule looks at.
+ QueryPeriod *string `json:"queryPeriod,omitempty"`
+ // TriggerOperator - The operation against the threshold that triggers alert rule. Possible values include: 'GreaterThan', 'LessThan', 'Equal', 'NotEqual'
+ TriggerOperator TriggerOperator `json:"triggerOperator,omitempty"`
+ // TriggerThreshold - The threshold triggers this alert rule.
+ TriggerThreshold *int32 `json:"triggerThreshold,omitempty"`
}
-// OperationsListIterator provides access to a complete listing of Operation values.
-type OperationsListIterator struct {
- i int
- page OperationsListPage
+// SecurityAlert represents a security alert entity.
+type SecurityAlert struct {
+ // SecurityAlertProperties - SecurityAlert entity properties
+ *SecurityAlertProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Azure resource Id
+ ID *string `json:"id,omitempty"`
+ // Type - READ-ONLY; Azure resource type
+ Type *string `json:"type,omitempty"`
+ // Name - READ-ONLY; Azure resource name
+ Name *string `json:"name,omitempty"`
+ // Kind - Possible values include: 'KindEntity', 'KindAccount', 'KindHost', 'KindFile', 'KindSecurityAlert', 'KindFileHash', 'KindMalware', 'KindSecurityGroup', 'KindAzureResource', 'KindCloudApplication', 'KindProcess', 'KindDNSResolution', 'KindIP', 'KindRegistryKey', 'KindRegistryValue', 'KindURL'
+ Kind KindBasicEntity `json:"kind,omitempty"`
}
-// NextWithContext advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-func (iter *OperationsListIterator) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/OperationsListIterator.NextWithContext")
- defer func() {
- sc := -1
- if iter.Response().Response.Response != nil {
- sc = iter.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- iter.i++
- if iter.i < len(iter.page.Values()) {
- return nil
+// MarshalJSON is the custom marshaler for SecurityAlert.
+func (sa SecurityAlert) MarshalJSON() ([]byte, error) {
+ sa.Kind = KindSecurityAlert
+ objectMap := make(map[string]interface{})
+ if sa.SecurityAlertProperties != nil {
+ objectMap["properties"] = sa.SecurityAlertProperties
}
- err = iter.page.NextWithContext(ctx)
- if err != nil {
- iter.i--
- return err
+ if sa.Kind != "" {
+ objectMap["kind"] = sa.Kind
}
- iter.i = 0
- return nil
+ return json.Marshal(objectMap)
}
-// Next advances to the next value. If there was an error making
-// the request the iterator does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (iter *OperationsListIterator) Next() error {
- return iter.NextWithContext(context.Background())
+// AsAccountEntity is the BasicEntity implementation for SecurityAlert.
+func (sa SecurityAlert) AsAccountEntity() (*AccountEntity, bool) {
+ return nil, false
}
-// NotDone returns true if the enumeration should be started or is not yet complete.
-func (iter OperationsListIterator) NotDone() bool {
- return iter.page.NotDone() && iter.i < len(iter.page.Values())
+// AsHostEntity is the BasicEntity implementation for SecurityAlert.
+func (sa SecurityAlert) AsHostEntity() (*HostEntity, bool) {
+ return nil, false
}
-// Response returns the raw server response from the last page request.
-func (iter OperationsListIterator) Response() OperationsList {
- return iter.page.Response()
+// AsFileEntity is the BasicEntity implementation for SecurityAlert.
+func (sa SecurityAlert) AsFileEntity() (*FileEntity, bool) {
+ return nil, false
}
-// Value returns the current value or a zero-initialized value if the
-// iterator has advanced beyond the end of the collection.
-func (iter OperationsListIterator) Value() Operation {
- if !iter.page.NotDone() {
- return Operation{}
- }
- return iter.page.Values()[iter.i]
+// AsSecurityAlert is the BasicEntity implementation for SecurityAlert.
+func (sa SecurityAlert) AsSecurityAlert() (*SecurityAlert, bool) {
+ return &sa, true
}
-// Creates a new instance of the OperationsListIterator type.
-func NewOperationsListIterator(page OperationsListPage) OperationsListIterator {
- return OperationsListIterator{page: page}
+// AsFileHashEntity is the BasicEntity implementation for SecurityAlert.
+func (sa SecurityAlert) AsFileHashEntity() (*FileHashEntity, bool) {
+ return nil, false
}
-// IsEmpty returns true if the ListResult contains no values.
-func (ol OperationsList) IsEmpty() bool {
- return ol.Value == nil || len(*ol.Value) == 0
+// AsMalwareEntity is the BasicEntity implementation for SecurityAlert.
+func (sa SecurityAlert) AsMalwareEntity() (*MalwareEntity, bool) {
+ return nil, false
}
-// operationsListPreparer prepares a request to retrieve the next set of results.
-// It returns nil if no more results exist.
-func (ol OperationsList) operationsListPreparer(ctx context.Context) (*http.Request, error) {
- if ol.NextLink == nil || len(to.String(ol.NextLink)) < 1 {
- return nil, nil
- }
- return autorest.Prepare((&http.Request{}).WithContext(ctx),
- autorest.AsJSON(),
- autorest.AsGet(),
- autorest.WithBaseURL(to.String(ol.NextLink)))
+// AsSecurityGroupEntity is the BasicEntity implementation for SecurityAlert.
+func (sa SecurityAlert) AsSecurityGroupEntity() (*SecurityGroupEntity, bool) {
+ return nil, false
}
-// OperationsListPage contains a page of Operation values.
-type OperationsListPage struct {
- fn func(context.Context, OperationsList) (OperationsList, error)
- ol OperationsList
+// AsAzureResourceEntity is the BasicEntity implementation for SecurityAlert.
+func (sa SecurityAlert) AsAzureResourceEntity() (*AzureResourceEntity, bool) {
+ return nil, false
}
-// NextWithContext advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-func (page *OperationsListPage) NextWithContext(ctx context.Context) (err error) {
- if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/OperationsListPage.NextWithContext")
- defer func() {
- sc := -1
- if page.Response().Response.Response != nil {
- sc = page.Response().Response.Response.StatusCode
- }
- tracing.EndSpan(ctx, sc, err)
- }()
- }
- next, err := page.fn(ctx, page.ol)
- if err != nil {
- return err
- }
- page.ol = next
- return nil
+// AsCloudApplicationEntity is the BasicEntity implementation for SecurityAlert.
+func (sa SecurityAlert) AsCloudApplicationEntity() (*CloudApplicationEntity, bool) {
+ return nil, false
}
-// Next advances to the next page of values. If there was an error making
-// the request the page does not advance and the error is returned.
-// Deprecated: Use NextWithContext() instead.
-func (page *OperationsListPage) Next() error {
- return page.NextWithContext(context.Background())
+// AsProcessEntity is the BasicEntity implementation for SecurityAlert.
+func (sa SecurityAlert) AsProcessEntity() (*ProcessEntity, bool) {
+ return nil, false
}
-// NotDone returns true if the page enumeration should be started or is not yet complete.
-func (page OperationsListPage) NotDone() bool {
- return !page.ol.IsEmpty()
+// AsDNSEntity is the BasicEntity implementation for SecurityAlert.
+func (sa SecurityAlert) AsDNSEntity() (*DNSEntity, bool) {
+ return nil, false
}
-// Response returns the raw server response from the last page request.
-func (page OperationsListPage) Response() OperationsList {
- return page.ol
+// AsIPEntity is the BasicEntity implementation for SecurityAlert.
+func (sa SecurityAlert) AsIPEntity() (*IPEntity, bool) {
+ return nil, false
}
-// Values returns the slice of values for the current page or nil if there are no values.
-func (page OperationsListPage) Values() []Operation {
- if page.ol.IsEmpty() {
- return nil
+// AsRegistryKeyEntity is the BasicEntity implementation for SecurityAlert.
+func (sa SecurityAlert) AsRegistryKeyEntity() (*RegistryKeyEntity, bool) {
+ return nil, false
+}
+
+// AsRegistryValueEntity is the BasicEntity implementation for SecurityAlert.
+func (sa SecurityAlert) AsRegistryValueEntity() (*RegistryValueEntity, bool) {
+ return nil, false
+}
+
+// AsURLEntity is the BasicEntity implementation for SecurityAlert.
+func (sa SecurityAlert) AsURLEntity() (*URLEntity, bool) {
+ return nil, false
+}
+
+// AsEntity is the BasicEntity implementation for SecurityAlert.
+func (sa SecurityAlert) AsEntity() (*Entity, bool) {
+ return nil, false
+}
+
+// AsBasicEntity is the BasicEntity implementation for SecurityAlert.
+func (sa SecurityAlert) AsBasicEntity() (BasicEntity, bool) {
+ return &sa, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for SecurityAlert struct.
+func (sa *SecurityAlert) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
}
- return *page.ol.Value
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var securityAlertProperties SecurityAlertProperties
+ err = json.Unmarshal(*v, &securityAlertProperties)
+ if err != nil {
+ return err
+ }
+ sa.SecurityAlertProperties = &securityAlertProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ sa.ID = &ID
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ sa.Type = &typeVar
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ sa.Name = &name
+ }
+ case "kind":
+ if v != nil {
+ var kind KindBasicEntity
+ err = json.Unmarshal(*v, &kind)
+ if err != nil {
+ return err
+ }
+ sa.Kind = kind
+ }
+ }
+ }
+
+ return nil
}
-// Creates a new instance of the OperationsListPage type.
-func NewOperationsListPage(getNextPage func(context.Context, OperationsList) (OperationsList, error)) OperationsListPage {
- return OperationsListPage{fn: getNextPage}
+// SecurityAlertProperties securityAlert entity property bag.
+type SecurityAlertProperties struct {
+ // SystemAlertID - READ-ONLY; Holds the product identifier of the alert for the product.
+ SystemAlertID *string `json:"systemAlertId,omitempty"`
+ // ConfidenceReasons - READ-ONLY; The confidence reasons
+ ConfidenceReasons *[]SecurityAlertPropertiesConfidenceReasonsItem `json:"confidenceReasons,omitempty"`
+ // ConfidenceScoreStatus - READ-ONLY; The confidence score calculation status, i.e. indicating if score calculation is pending for this alert, not applicable or final. Possible values include: 'NotApplicable', 'InProcess', 'NotFinal', 'Final'
+ ConfidenceScoreStatus ConfidenceScoreStatus `json:"confidenceScoreStatus,omitempty"`
+ // Intent - READ-ONLY; Holds the alert intent stage(s) mapping for this alert. Possible values include: 'KillChainIntentUnknown', 'KillChainIntentProbing', 'KillChainIntentExploitation', 'KillChainIntentPersistence', 'KillChainIntentPrivilegeEscalation', 'KillChainIntentDefenseEvasion', 'KillChainIntentCredentialAccess', 'KillChainIntentDiscovery', 'KillChainIntentLateralMovement', 'KillChainIntentExecution', 'KillChainIntentCollection', 'KillChainIntentExfiltration', 'KillChainIntentCommandAndControl', 'KillChainIntentImpact'
+ Intent KillChainIntent `json:"intent,omitempty"`
+ // ConfidenceScore - READ-ONLY; The confidence score of the alert.
+ ConfidenceScore *float64 `json:"confidenceScore,omitempty"`
+ // AlertDisplayName - READ-ONLY; The display name of the alert.
+ AlertDisplayName *string `json:"alertDisplayName,omitempty"`
+ // Description - READ-ONLY; Alert description.
+ Description *string `json:"description,omitempty"`
+ // RemediationSteps - READ-ONLY; Manual action items to take to remediate the alert.
+ RemediationSteps *[]string `json:"remediationSteps,omitempty"`
+ // ConfidenceLevel - READ-ONLY; The confidence level of this alert. Possible values include: 'ConfidenceLevelUnknown', 'ConfidenceLevelLow', 'ConfidenceLevelHigh'
+ ConfidenceLevel ConfidenceLevel `json:"confidenceLevel,omitempty"`
+ // Severity - The severity of the alert. Possible values include: 'High', 'Medium', 'Low', 'Informational'
+ Severity AlertSeverity `json:"severity,omitempty"`
+ // VendorName - READ-ONLY; The name of the vendor that raise the alert.
+ VendorName *string `json:"vendorName,omitempty"`
+ // ProductName - READ-ONLY; The name of the product which published this alert.
+ ProductName *string `json:"productName,omitempty"`
+ // ProductComponentName - READ-ONLY; The name of a component inside the product which generated the alert.
+ ProductComponentName *string `json:"productComponentName,omitempty"`
+ // AlertType - READ-ONLY; The type name of the alert.
+ AlertType *string `json:"alertType,omitempty"`
+ // ProductVersion - READ-ONLY; The version of the product generating the alert.
+ ProductVersion *string `json:"productVersion,omitempty"`
+ // ProcessingEndTime - READ-ONLY; The time the alert was made available for consumption.
+ ProcessingEndTime *date.Time `json:"processingEndTime,omitempty"`
+ // Status - READ-ONLY; The lifecycle status of the alert. Possible values include: 'AlertStatusUnknown', 'AlertStatusNew', 'AlertStatusResolved', 'AlertStatusDismissed', 'AlertStatusInProgress'
+ Status AlertStatus `json:"status,omitempty"`
+ // EndTimeUtc - READ-ONLY; The impact end time of the alert (the time of the last event contributing to the alert).
+ EndTimeUtc *date.Time `json:"endTimeUtc,omitempty"`
+ // StartTimeUtc - READ-ONLY; The impact start time of the alert (the time of the first event contributing to the alert).
+ StartTimeUtc *date.Time `json:"startTimeUtc,omitempty"`
+ // TimeGenerated - READ-ONLY; The time the alert was generated.
+ TimeGenerated *date.Time `json:"timeGenerated,omitempty"`
+ // CompromisedEntity - READ-ONLY; Display name of the main entity being reported on.
+ CompromisedEntity *string `json:"compromisedEntity,omitempty"`
+ // FriendlyName - READ-ONLY; The graph item display name which is a short humanly readable description of the graph item instance. This property is optional and might be system generated.
+ FriendlyName *string `json:"friendlyName,omitempty"`
+ // AdditionalData - READ-ONLY; A bag of custom fields that should be part of the entity and will be presented to the user.
+ AdditionalData map[string]interface{} `json:"additionalData"`
+}
+
+// MarshalJSON is the custom marshaler for SecurityAlertProperties.
+func (sap SecurityAlertProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if sap.Severity != "" {
+ objectMap["severity"] = sap.Severity
+ }
+ return json.Marshal(objectMap)
}
-// Resource an azure resource object
-type Resource struct {
- // ID - READ-ONLY; Azure resource Id
- ID *string `json:"id,omitempty"`
- // Type - READ-ONLY; Azure resource type
- Type *string `json:"type,omitempty"`
- // Name - READ-ONLY; Azure resource name
- Name *string `json:"name,omitempty"`
+// SecurityAlertPropertiesConfidenceReasonsItem confidence reason item
+type SecurityAlertPropertiesConfidenceReasonsItem struct {
+ // ReasonType - READ-ONLY; The type (category) of the reason
+ ReasonType *string `json:"reasonType,omitempty"`
+ // Reason - READ-ONLY; The reason's description
+ Reason *string `json:"reason,omitempty"`
}
-// ScheduledAlertRule represents scheduled alert rule.
-type ScheduledAlertRule struct {
- // ScheduledAlertRuleProperties - Scheduled alert rule properties
- *ScheduledAlertRuleProperties `json:"properties,omitempty"`
+// SecurityGroupEntity represents a security group entity.
+type SecurityGroupEntity struct {
+ // SecurityGroupEntityProperties - SecurityGroup entity properties
+ *SecurityGroupEntityProperties `json:"properties,omitempty"`
// ID - READ-ONLY; Azure resource Id
ID *string `json:"id,omitempty"`
// Type - READ-ONLY; Azure resource type
Type *string `json:"type,omitempty"`
// Name - READ-ONLY; Azure resource name
Name *string `json:"name,omitempty"`
- // Etag - Etag of the alert rule.
- Etag *string `json:"etag,omitempty"`
- // Kind - Possible values include: 'KindAlertRule', 'KindScheduled'
- Kind Kind `json:"kind,omitempty"`
+ // Kind - Possible values include: 'KindEntity', 'KindAccount', 'KindHost', 'KindFile', 'KindSecurityAlert', 'KindFileHash', 'KindMalware', 'KindSecurityGroup', 'KindAzureResource', 'KindCloudApplication', 'KindProcess', 'KindDNSResolution', 'KindIP', 'KindRegistryKey', 'KindRegistryValue', 'KindURL'
+ Kind KindBasicEntity `json:"kind,omitempty"`
}
-// MarshalJSON is the custom marshaler for ScheduledAlertRule.
-func (sar ScheduledAlertRule) MarshalJSON() ([]byte, error) {
- sar.Kind = KindScheduled
+// MarshalJSON is the custom marshaler for SecurityGroupEntity.
+func (sge SecurityGroupEntity) MarshalJSON() ([]byte, error) {
+ sge.Kind = KindSecurityGroup
objectMap := make(map[string]interface{})
- if sar.ScheduledAlertRuleProperties != nil {
- objectMap["properties"] = sar.ScheduledAlertRuleProperties
- }
- if sar.Etag != nil {
- objectMap["etag"] = sar.Etag
+ if sge.SecurityGroupEntityProperties != nil {
+ objectMap["properties"] = sge.SecurityGroupEntityProperties
}
- if sar.Kind != "" {
- objectMap["kind"] = sar.Kind
+ if sge.Kind != "" {
+ objectMap["kind"] = sge.Kind
}
return json.Marshal(objectMap)
}
-// AsScheduledAlertRule is the BasicAlertRule implementation for ScheduledAlertRule.
-func (sar ScheduledAlertRule) AsScheduledAlertRule() (*ScheduledAlertRule, bool) {
- return &sar, true
+// AsAccountEntity is the BasicEntity implementation for SecurityGroupEntity.
+func (sge SecurityGroupEntity) AsAccountEntity() (*AccountEntity, bool) {
+ return nil, false
}
-// AsAlertRule is the BasicAlertRule implementation for ScheduledAlertRule.
-func (sar ScheduledAlertRule) AsAlertRule() (*AlertRule, bool) {
+// AsHostEntity is the BasicEntity implementation for SecurityGroupEntity.
+func (sge SecurityGroupEntity) AsHostEntity() (*HostEntity, bool) {
return nil, false
}
-// AsBasicAlertRule is the BasicAlertRule implementation for ScheduledAlertRule.
-func (sar ScheduledAlertRule) AsBasicAlertRule() (BasicAlertRule, bool) {
- return &sar, true
+// AsFileEntity is the BasicEntity implementation for SecurityGroupEntity.
+func (sge SecurityGroupEntity) AsFileEntity() (*FileEntity, bool) {
+ return nil, false
}
-// UnmarshalJSON is the custom unmarshaler for ScheduledAlertRule struct.
-func (sar *ScheduledAlertRule) UnmarshalJSON(body []byte) error {
+// AsSecurityAlert is the BasicEntity implementation for SecurityGroupEntity.
+func (sge SecurityGroupEntity) AsSecurityAlert() (*SecurityAlert, bool) {
+ return nil, false
+}
+
+// AsFileHashEntity is the BasicEntity implementation for SecurityGroupEntity.
+func (sge SecurityGroupEntity) AsFileHashEntity() (*FileHashEntity, bool) {
+ return nil, false
+}
+
+// AsMalwareEntity is the BasicEntity implementation for SecurityGroupEntity.
+func (sge SecurityGroupEntity) AsMalwareEntity() (*MalwareEntity, bool) {
+ return nil, false
+}
+
+// AsSecurityGroupEntity is the BasicEntity implementation for SecurityGroupEntity.
+func (sge SecurityGroupEntity) AsSecurityGroupEntity() (*SecurityGroupEntity, bool) {
+ return &sge, true
+}
+
+// AsAzureResourceEntity is the BasicEntity implementation for SecurityGroupEntity.
+func (sge SecurityGroupEntity) AsAzureResourceEntity() (*AzureResourceEntity, bool) {
+ return nil, false
+}
+
+// AsCloudApplicationEntity is the BasicEntity implementation for SecurityGroupEntity.
+func (sge SecurityGroupEntity) AsCloudApplicationEntity() (*CloudApplicationEntity, bool) {
+ return nil, false
+}
+
+// AsProcessEntity is the BasicEntity implementation for SecurityGroupEntity.
+func (sge SecurityGroupEntity) AsProcessEntity() (*ProcessEntity, bool) {
+ return nil, false
+}
+
+// AsDNSEntity is the BasicEntity implementation for SecurityGroupEntity.
+func (sge SecurityGroupEntity) AsDNSEntity() (*DNSEntity, bool) {
+ return nil, false
+}
+
+// AsIPEntity is the BasicEntity implementation for SecurityGroupEntity.
+func (sge SecurityGroupEntity) AsIPEntity() (*IPEntity, bool) {
+ return nil, false
+}
+
+// AsRegistryKeyEntity is the BasicEntity implementation for SecurityGroupEntity.
+func (sge SecurityGroupEntity) AsRegistryKeyEntity() (*RegistryKeyEntity, bool) {
+ return nil, false
+}
+
+// AsRegistryValueEntity is the BasicEntity implementation for SecurityGroupEntity.
+func (sge SecurityGroupEntity) AsRegistryValueEntity() (*RegistryValueEntity, bool) {
+ return nil, false
+}
+
+// AsURLEntity is the BasicEntity implementation for SecurityGroupEntity.
+func (sge SecurityGroupEntity) AsURLEntity() (*URLEntity, bool) {
+ return nil, false
+}
+
+// AsEntity is the BasicEntity implementation for SecurityGroupEntity.
+func (sge SecurityGroupEntity) AsEntity() (*Entity, bool) {
+ return nil, false
+}
+
+// AsBasicEntity is the BasicEntity implementation for SecurityGroupEntity.
+func (sge SecurityGroupEntity) AsBasicEntity() (BasicEntity, bool) {
+ return &sge, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for SecurityGroupEntity struct.
+func (sge *SecurityGroupEntity) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
@@ -4630,12 +8743,12 @@ func (sar *ScheduledAlertRule) UnmarshalJSON(body []byte) error {
switch k {
case "properties":
if v != nil {
- var scheduledAlertRuleProperties ScheduledAlertRuleProperties
- err = json.Unmarshal(*v, &scheduledAlertRuleProperties)
+ var securityGroupEntityProperties SecurityGroupEntityProperties
+ err = json.Unmarshal(*v, &securityGroupEntityProperties)
if err != nil {
return err
}
- sar.ScheduledAlertRuleProperties = &scheduledAlertRuleProperties
+ sge.SecurityGroupEntityProperties = &securityGroupEntityProperties
}
case "id":
if v != nil {
@@ -4644,7 +8757,7 @@ func (sar *ScheduledAlertRule) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- sar.ID = &ID
+ sge.ID = &ID
}
case "type":
if v != nil {
@@ -4653,7 +8766,7 @@ func (sar *ScheduledAlertRule) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- sar.Type = &typeVar
+ sge.Type = &typeVar
}
case "name":
if v != nil {
@@ -4662,25 +8775,16 @@ func (sar *ScheduledAlertRule) UnmarshalJSON(body []byte) error {
if err != nil {
return err
}
- sar.Name = &name
- }
- case "etag":
- if v != nil {
- var etag string
- err = json.Unmarshal(*v, &etag)
- if err != nil {
- return err
- }
- sar.Etag = &etag
+ sge.Name = &name
}
case "kind":
if v != nil {
- var kind Kind
+ var kind KindBasicEntity
err = json.Unmarshal(*v, &kind)
if err != nil {
return err
}
- sar.Kind = kind
+ sge.Kind = kind
}
}
}
@@ -4688,32 +8792,24 @@ func (sar *ScheduledAlertRule) UnmarshalJSON(body []byte) error {
return nil
}
-// ScheduledAlertRuleProperties alert rule property bag.
-type ScheduledAlertRuleProperties struct {
- // DisplayName - The display name for alerts created by this alert rule.
- DisplayName *string `json:"displayName,omitempty"`
- // Description - The description of the alert rule.
- Description *string `json:"description,omitempty"`
- // Severity - The severity for alerts created by this alert rule. Possible values include: 'High', 'Medium', 'Low', 'Informational'
- Severity AlertSeverity `json:"severity,omitempty"`
- // Enabled - Determines whether this alert rule is enabled or disabled.
- Enabled *bool `json:"enabled,omitempty"`
- // Query - The query that creates alerts for this rule.
- Query *string `json:"query,omitempty"`
- // QueryFrequency - The frequency (in ISO 8601 duration format) for this alert rule to run.
- QueryFrequency *string `json:"queryFrequency,omitempty"`
- // QueryPeriod - The period (in ISO 8601 duration format) that this alert rule looks at.
- QueryPeriod *string `json:"queryPeriod,omitempty"`
- // TriggerOperator - The operation against the threshold that triggers alert rule. Possible values include: 'GreaterThan', 'LessThan', 'Equal', 'NotEqual'
- TriggerOperator TriggerOperator `json:"triggerOperator,omitempty"`
- // TriggerThreshold - The threshold triggers this alert rule.
- TriggerThreshold *int32 `json:"triggerThreshold,omitempty"`
- // SuppressionEnabled - Determines whether the suppression for this alert rule is enabled or disabled.
- SuppressionEnabled *bool `json:"suppressionEnabled,omitempty"`
- // SuppressionDuration - The suppression (in ISO 8601 duration format) to wait since last time this alert rule been triggered.
- SuppressionDuration *string `json:"suppressionDuration,omitempty"`
- // LastModifiedUtc - READ-ONLY; The last time that this alert has been modified.
- LastModifiedUtc *string `json:"lastModifiedUtc,omitempty"`
+// SecurityGroupEntityProperties securityGroup entity property bag.
+type SecurityGroupEntityProperties struct {
+ // DistinguishedName - READ-ONLY; The group distinguished name
+ DistinguishedName *string `json:"distinguishedName,omitempty"`
+ // Sid - READ-ONLY; The SID attribute is a single-value attribute that specifies the security identifier (SID) of the group
+ Sid *string `json:"sid,omitempty"`
+ // ObjectGUID - READ-ONLY; A single-value attribute that is the unique identifier for the object, assigned by active directory.
+ ObjectGUID *uuid.UUID `json:"objectGuid,omitempty"`
+ // FriendlyName - READ-ONLY; The graph item display name which is a short humanly readable description of the graph item instance. This property is optional and might be system generated.
+ FriendlyName *string `json:"friendlyName,omitempty"`
+ // AdditionalData - READ-ONLY; A bag of custom fields that should be part of the entity and will be presented to the user.
+ AdditionalData map[string]interface{} `json:"additionalData"`
+}
+
+// MarshalJSON is the custom marshaler for SecurityGroupEntityProperties.
+func (sgep SecurityGroupEntityProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
}
// BasicSettings the Setting.
@@ -4835,6 +8931,22 @@ func (sm *SettingsModel) UnmarshalJSON(body []byte) error {
return nil
}
+// ThreatIntelligence threatIntelligence property bag.
+type ThreatIntelligence struct {
+ // ProviderName - READ-ONLY; Name of the provider from whom this Threat Intelligence information was received
+ ProviderName *string `json:"providerName,omitempty"`
+ // ThreatType - READ-ONLY; Threat type (e.g. "Botnet")
+ ThreatType *string `json:"threatType,omitempty"`
+ // ThreatName - READ-ONLY; Threat name (e.g. "Jedobot malware")
+ ThreatName *string `json:"threatName,omitempty"`
+ // Confidence - READ-ONLY; Confidence (must be between 0 and 1)
+ Confidence *float64 `json:"confidence,omitempty"`
+ // ReportLink - READ-ONLY; Report link
+ ReportLink *string `json:"reportLink,omitempty"`
+ // ThreatDescription - READ-ONLY; Threat description (free text)
+ ThreatDescription *string `json:"threatDescription,omitempty"`
+}
+
// TIDataConnector represents threat intelligence data connector.
type TIDataConnector struct {
// TIDataConnectorProperties - TI (Threat Intelligence) data connector properties.
@@ -5264,12 +9376,200 @@ type UebaSettingsProperties struct {
AtpLicenseStatus LicenseStatus `json:"atpLicenseStatus,omitempty"`
}
+// URLEntity represents a url entity.
+type URLEntity struct {
+ // URLEntityProperties - Url entity properties
+ *URLEntityProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Azure resource Id
+ ID *string `json:"id,omitempty"`
+ // Type - READ-ONLY; Azure resource type
+ Type *string `json:"type,omitempty"`
+ // Name - READ-ONLY; Azure resource name
+ Name *string `json:"name,omitempty"`
+ // Kind - Possible values include: 'KindEntity', 'KindAccount', 'KindHost', 'KindFile', 'KindSecurityAlert', 'KindFileHash', 'KindMalware', 'KindSecurityGroup', 'KindAzureResource', 'KindCloudApplication', 'KindProcess', 'KindDNSResolution', 'KindIP', 'KindRegistryKey', 'KindRegistryValue', 'KindURL'
+ Kind KindBasicEntity `json:"kind,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for URLEntity.
+func (ue URLEntity) MarshalJSON() ([]byte, error) {
+ ue.Kind = KindURL
+ objectMap := make(map[string]interface{})
+ if ue.URLEntityProperties != nil {
+ objectMap["properties"] = ue.URLEntityProperties
+ }
+ if ue.Kind != "" {
+ objectMap["kind"] = ue.Kind
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsAccountEntity is the BasicEntity implementation for URLEntity.
+func (ue URLEntity) AsAccountEntity() (*AccountEntity, bool) {
+ return nil, false
+}
+
+// AsHostEntity is the BasicEntity implementation for URLEntity.
+func (ue URLEntity) AsHostEntity() (*HostEntity, bool) {
+ return nil, false
+}
+
+// AsFileEntity is the BasicEntity implementation for URLEntity.
+func (ue URLEntity) AsFileEntity() (*FileEntity, bool) {
+ return nil, false
+}
+
+// AsSecurityAlert is the BasicEntity implementation for URLEntity.
+func (ue URLEntity) AsSecurityAlert() (*SecurityAlert, bool) {
+ return nil, false
+}
+
+// AsFileHashEntity is the BasicEntity implementation for URLEntity.
+func (ue URLEntity) AsFileHashEntity() (*FileHashEntity, bool) {
+ return nil, false
+}
+
+// AsMalwareEntity is the BasicEntity implementation for URLEntity.
+func (ue URLEntity) AsMalwareEntity() (*MalwareEntity, bool) {
+ return nil, false
+}
+
+// AsSecurityGroupEntity is the BasicEntity implementation for URLEntity.
+func (ue URLEntity) AsSecurityGroupEntity() (*SecurityGroupEntity, bool) {
+ return nil, false
+}
+
+// AsAzureResourceEntity is the BasicEntity implementation for URLEntity.
+func (ue URLEntity) AsAzureResourceEntity() (*AzureResourceEntity, bool) {
+ return nil, false
+}
+
+// AsCloudApplicationEntity is the BasicEntity implementation for URLEntity.
+func (ue URLEntity) AsCloudApplicationEntity() (*CloudApplicationEntity, bool) {
+ return nil, false
+}
+
+// AsProcessEntity is the BasicEntity implementation for URLEntity.
+func (ue URLEntity) AsProcessEntity() (*ProcessEntity, bool) {
+ return nil, false
+}
+
+// AsDNSEntity is the BasicEntity implementation for URLEntity.
+func (ue URLEntity) AsDNSEntity() (*DNSEntity, bool) {
+ return nil, false
+}
+
+// AsIPEntity is the BasicEntity implementation for URLEntity.
+func (ue URLEntity) AsIPEntity() (*IPEntity, bool) {
+ return nil, false
+}
+
+// AsRegistryKeyEntity is the BasicEntity implementation for URLEntity.
+func (ue URLEntity) AsRegistryKeyEntity() (*RegistryKeyEntity, bool) {
+ return nil, false
+}
+
+// AsRegistryValueEntity is the BasicEntity implementation for URLEntity.
+func (ue URLEntity) AsRegistryValueEntity() (*RegistryValueEntity, bool) {
+ return nil, false
+}
+
+// AsURLEntity is the BasicEntity implementation for URLEntity.
+func (ue URLEntity) AsURLEntity() (*URLEntity, bool) {
+ return &ue, true
+}
+
+// AsEntity is the BasicEntity implementation for URLEntity.
+func (ue URLEntity) AsEntity() (*Entity, bool) {
+ return nil, false
+}
+
+// AsBasicEntity is the BasicEntity implementation for URLEntity.
+func (ue URLEntity) AsBasicEntity() (BasicEntity, bool) {
+ return &ue, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for URLEntity struct.
+func (ue *URLEntity) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var URLEntityProperties URLEntityProperties
+ err = json.Unmarshal(*v, &URLEntityProperties)
+ if err != nil {
+ return err
+ }
+ ue.URLEntityProperties = &URLEntityProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ ue.ID = &ID
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ ue.Type = &typeVar
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ ue.Name = &name
+ }
+ case "kind":
+ if v != nil {
+ var kind KindBasicEntity
+ err = json.Unmarshal(*v, &kind)
+ if err != nil {
+ return err
+ }
+ ue.Kind = kind
+ }
+ }
+ }
+
+ return nil
+}
+
+// URLEntityProperties url entity property bag.
+type URLEntityProperties struct {
+ // URL - READ-ONLY; A full URL the entity points to
+ URL *string `json:"url,omitempty"`
+ // FriendlyName - READ-ONLY; The graph item display name which is a short humanly readable description of the graph item instance. This property is optional and might be system generated.
+ FriendlyName *string `json:"friendlyName,omitempty"`
+ // AdditionalData - READ-ONLY; A bag of custom fields that should be part of the entity and will be presented to the user.
+ AdditionalData map[string]interface{} `json:"additionalData"`
+}
+
+// MarshalJSON is the custom marshaler for URLEntityProperties.
+func (uep URLEntityProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ return json.Marshal(objectMap)
+}
+
// UserInfo user information that made some action
type UserInfo struct {
// ObjectID - The object id of the user.
ObjectID *uuid.UUID `json:"objectId,omitempty"`
- // Email - The email of the user.
+ // Email - READ-ONLY; The email of the user.
Email *string `json:"email,omitempty"`
- // Name - The name of the user.
+ // Name - READ-ONLY; The name of the user.
Name *string `json:"name,omitempty"`
}
diff --git a/services/preview/securityinsight/mgmt/2017-08-01-preview/securityinsight/securityinsightapi/interfaces.go b/services/preview/securityinsight/mgmt/2017-08-01-preview/securityinsight/securityinsightapi/interfaces.go
index 80cadcd82ce4..854841ce503f 100644
--- a/services/preview/securityinsight/mgmt/2017-08-01-preview/securityinsight/securityinsightapi/interfaces.go
+++ b/services/preview/securityinsight/mgmt/2017-08-01-preview/securityinsight/securityinsightapi/interfaces.go
@@ -50,16 +50,39 @@ type ActionsClientAPI interface {
var _ ActionsClientAPI = (*securityinsight.ActionsClient)(nil)
+// AlertRuleTemplatesClientAPI contains the set of methods on the AlertRuleTemplatesClient type.
+type AlertRuleTemplatesClientAPI interface {
+ Get(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string, alertRuleTemplateID string) (result securityinsight.AlertRuleTemplateModel, err error)
+ List(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string) (result securityinsight.AlertRuleTemplatesListPage, err error)
+}
+
+var _ AlertRuleTemplatesClientAPI = (*securityinsight.AlertRuleTemplatesClient)(nil)
+
// CasesClientAPI contains the set of methods on the CasesClient type.
type CasesClientAPI interface {
CreateOrUpdate(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string, caseID string, caseParameter securityinsight.Case) (result securityinsight.Case, err error)
Delete(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string, caseID string) (result autorest.Response, err error)
Get(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string, caseID string) (result securityinsight.Case, err error)
- List(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string) (result securityinsight.CaseListPage, err error)
+ GetComment(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string, caseID string, caseCommentID string) (result securityinsight.CaseComment, err error)
+ List(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string, filter string, orderby string, top *int32, skipToken string) (result securityinsight.CaseListPage, err error)
}
var _ CasesClientAPI = (*securityinsight.CasesClient)(nil)
+// CommentsClientAPI contains the set of methods on the CommentsClient type.
+type CommentsClientAPI interface {
+ ListByCase(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string, caseID string, filter string, orderby string, top *int32, skipToken string) (result securityinsight.CaseCommentListPage, err error)
+}
+
+var _ CommentsClientAPI = (*securityinsight.CommentsClient)(nil)
+
+// CaseCommentsClientAPI contains the set of methods on the CaseCommentsClient type.
+type CaseCommentsClientAPI interface {
+ CreateComment(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string, caseID string, caseCommentID string, caseComment securityinsight.CaseComment) (result securityinsight.CaseComment, err error)
+}
+
+var _ CaseCommentsClientAPI = (*securityinsight.CaseCommentsClient)(nil)
+
// BookmarksClientAPI contains the set of methods on the BookmarksClient type.
type BookmarksClientAPI interface {
CreateOrUpdate(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string, bookmarkID string, bookmark securityinsight.Bookmark) (result securityinsight.Bookmark, err error)
@@ -82,6 +105,7 @@ var _ DataConnectorsClientAPI = (*securityinsight.DataConnectorsClient)(nil)
// EntitiesClientAPI contains the set of methods on the EntitiesClient type.
type EntitiesClientAPI interface {
+ Expand(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string, entityID string, parameters securityinsight.EntityExpandParameters) (result securityinsight.EntityExpandResponse, err error)
Get(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string, entityID string) (result securityinsight.EntityModel, err error)
List(ctx context.Context, resourceGroupName string, operationalInsightsResourceProvider string, workspaceName string) (result securityinsight.EntityListPage, err error)
}
diff --git a/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/applications.go b/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/applications.go
new file mode 100644
index 000000000000..6fa4e1a8c9a5
--- /dev/null
+++ b/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/applications.go
@@ -0,0 +1,441 @@
+package servicefabric
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// ApplicationsClient is the service Fabric Management Client
+type ApplicationsClient struct {
+ BaseClient
+}
+
+// NewApplicationsClient creates an instance of the ApplicationsClient client.
+func NewApplicationsClient(subscriptionID string) ApplicationsClient {
+ return NewApplicationsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewApplicationsClientWithBaseURI creates an instance of the ApplicationsClient client.
+func NewApplicationsClientWithBaseURI(baseURI string, subscriptionID string) ApplicationsClient {
+ return ApplicationsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// Create create or update a Service Fabric application resource with the specified name.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// clusterName - the name of the cluster resource.
+// applicationName - the name of the application resource.
+// parameters - the application resource.
+func (client ApplicationsClient) Create(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, parameters ApplicationResource) (result ApplicationsCreateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationsClient.Create")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.CreatePreparer(ctx, resourceGroupName, clusterName, applicationName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationsClient", "Create", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.CreateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationsClient", "Create", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// CreatePreparer prepares the Create request.
+func (client ApplicationsClient) CreatePreparer(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, parameters ApplicationResource) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "applicationName": autorest.Encode("path", applicationName),
+ "clusterName": autorest.Encode("path", clusterName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applications/{applicationName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateSender sends the Create request. The method will close the
+// http.Response Body if it receives an error.
+func (client ApplicationsClient) CreateSender(req *http.Request) (future ApplicationsCreateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// CreateResponder handles the response to the Create request. The method always
+// closes the http.Response Body.
+func (client ApplicationsClient) CreateResponder(resp *http.Response) (result ApplicationResource, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete delete a Service Fabric application resource with the specified name.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// clusterName - the name of the cluster resource.
+// applicationName - the name of the application resource.
+func (client ApplicationsClient) Delete(ctx context.Context, resourceGroupName string, clusterName string, applicationName string) (result ApplicationsDeleteFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, resourceGroupName, clusterName, applicationName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationsClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.DeleteSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationsClient", "Delete", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client ApplicationsClient) DeletePreparer(ctx context.Context, resourceGroupName string, clusterName string, applicationName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "applicationName": autorest.Encode("path", applicationName),
+ "clusterName": autorest.Encode("path", clusterName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applications/{applicationName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client ApplicationsClient) DeleteSender(req *http.Request) (future ApplicationsDeleteFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client ApplicationsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get get a Service Fabric application resource created or in the process of being created in the Service Fabric
+// cluster resource.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// clusterName - the name of the cluster resource.
+// applicationName - the name of the application resource.
+func (client ApplicationsClient) Get(ctx context.Context, resourceGroupName string, clusterName string, applicationName string) (result ApplicationResource, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, resourceGroupName, clusterName, applicationName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationsClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationsClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationsClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client ApplicationsClient) GetPreparer(ctx context.Context, resourceGroupName string, clusterName string, applicationName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "applicationName": autorest.Encode("path", applicationName),
+ "clusterName": autorest.Encode("path", clusterName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applications/{applicationName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client ApplicationsClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client ApplicationsClient) GetResponder(resp *http.Response) (result ApplicationResource, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List gets all application resources created or in the process of being created in the Service Fabric cluster
+// resource.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// clusterName - the name of the cluster resource.
+func (client ApplicationsClient) List(ctx context.Context, resourceGroupName string, clusterName string) (result ApplicationResourceList, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.ListPreparer(ctx, resourceGroupName, clusterName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationsClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationsClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationsClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client ApplicationsClient) ListPreparer(ctx context.Context, resourceGroupName string, clusterName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "clusterName": autorest.Encode("path", clusterName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applications", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client ApplicationsClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client ApplicationsClient) ListResponder(resp *http.Response) (result ApplicationResourceList, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Update update a Service Fabric application resource with the specified name.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// clusterName - the name of the cluster resource.
+// applicationName - the name of the application resource.
+// parameters - the application resource for patch operations.
+func (client ApplicationsClient) Update(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, parameters ApplicationResourceUpdate) (result ApplicationsUpdateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationsClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.UpdatePreparer(ctx, resourceGroupName, clusterName, applicationName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationsClient", "Update", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.UpdateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationsClient", "Update", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// UpdatePreparer prepares the Update request.
+func (client ApplicationsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, parameters ApplicationResourceUpdate) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "applicationName": autorest.Encode("path", applicationName),
+ "clusterName": autorest.Encode("path", clusterName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applications/{applicationName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateSender sends the Update request. The method will close the
+// http.Response Body if it receives an error.
+func (client ApplicationsClient) UpdateSender(req *http.Request) (future ApplicationsUpdateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// UpdateResponder handles the response to the Update request. The method always
+// closes the http.Response Body.
+func (client ApplicationsClient) UpdateResponder(resp *http.Response) (result ApplicationResource, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/applicationtypes.go b/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/applicationtypes.go
new file mode 100644
index 000000000000..6e7d97a9a721
--- /dev/null
+++ b/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/applicationtypes.go
@@ -0,0 +1,359 @@
+package servicefabric
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// ApplicationTypesClient is the service Fabric Management Client
+type ApplicationTypesClient struct {
+ BaseClient
+}
+
+// NewApplicationTypesClient creates an instance of the ApplicationTypesClient client.
+func NewApplicationTypesClient(subscriptionID string) ApplicationTypesClient {
+ return NewApplicationTypesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewApplicationTypesClientWithBaseURI creates an instance of the ApplicationTypesClient client.
+func NewApplicationTypesClientWithBaseURI(baseURI string, subscriptionID string) ApplicationTypesClient {
+ return ApplicationTypesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// Create create or update a Service Fabric application type name resource with the specified name.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// clusterName - the name of the cluster resource.
+// applicationTypeName - the name of the application type name resource.
+// parameters - the application type name resource.
+func (client ApplicationTypesClient) Create(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, parameters ApplicationTypeResource) (result ApplicationTypeResource, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationTypesClient.Create")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.CreatePreparer(ctx, resourceGroupName, clusterName, applicationTypeName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypesClient", "Create", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypesClient", "Create", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypesClient", "Create", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreatePreparer prepares the Create request.
+func (client ApplicationTypesClient) CreatePreparer(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, parameters ApplicationTypeResource) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "applicationTypeName": autorest.Encode("path", applicationTypeName),
+ "clusterName": autorest.Encode("path", clusterName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applicationTypes/{applicationTypeName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateSender sends the Create request. The method will close the
+// http.Response Body if it receives an error.
+func (client ApplicationTypesClient) CreateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CreateResponder handles the response to the Create request. The method always
+// closes the http.Response Body.
+func (client ApplicationTypesClient) CreateResponder(resp *http.Response) (result ApplicationTypeResource, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete delete a Service Fabric application type name resource with the specified name.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// clusterName - the name of the cluster resource.
+// applicationTypeName - the name of the application type name resource.
+func (client ApplicationTypesClient) Delete(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string) (result ApplicationTypesDeleteFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationTypesClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, resourceGroupName, clusterName, applicationTypeName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypesClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.DeleteSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypesClient", "Delete", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client ApplicationTypesClient) DeletePreparer(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "applicationTypeName": autorest.Encode("path", applicationTypeName),
+ "clusterName": autorest.Encode("path", clusterName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applicationTypes/{applicationTypeName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client ApplicationTypesClient) DeleteSender(req *http.Request) (future ApplicationTypesDeleteFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client ApplicationTypesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get get a Service Fabric application type name resource created or in the process of being created in the Service
+// Fabric cluster resource.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// clusterName - the name of the cluster resource.
+// applicationTypeName - the name of the application type name resource.
+func (client ApplicationTypesClient) Get(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string) (result ApplicationTypeResource, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationTypesClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, resourceGroupName, clusterName, applicationTypeName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypesClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypesClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypesClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client ApplicationTypesClient) GetPreparer(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "applicationTypeName": autorest.Encode("path", applicationTypeName),
+ "clusterName": autorest.Encode("path", clusterName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applicationTypes/{applicationTypeName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client ApplicationTypesClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client ApplicationTypesClient) GetResponder(resp *http.Response) (result ApplicationTypeResource, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List gets all application type name resources created or in the process of being created in the Service Fabric
+// cluster resource.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// clusterName - the name of the cluster resource.
+func (client ApplicationTypesClient) List(ctx context.Context, resourceGroupName string, clusterName string) (result ApplicationTypeResourceList, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationTypesClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.ListPreparer(ctx, resourceGroupName, clusterName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypesClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypesClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypesClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client ApplicationTypesClient) ListPreparer(ctx context.Context, resourceGroupName string, clusterName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "clusterName": autorest.Encode("path", clusterName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applicationTypes", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client ApplicationTypesClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client ApplicationTypesClient) ListResponder(resp *http.Response) (result ApplicationTypeResourceList, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/applicationtypeversions.go b/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/applicationtypeversions.go
new file mode 100644
index 000000000000..b9a1c3476dc9
--- /dev/null
+++ b/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/applicationtypeversions.go
@@ -0,0 +1,375 @@
+package servicefabric
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// ApplicationTypeVersionsClient is the service Fabric Management Client
+type ApplicationTypeVersionsClient struct {
+ BaseClient
+}
+
+// NewApplicationTypeVersionsClient creates an instance of the ApplicationTypeVersionsClient client.
+func NewApplicationTypeVersionsClient(subscriptionID string) ApplicationTypeVersionsClient {
+ return NewApplicationTypeVersionsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewApplicationTypeVersionsClientWithBaseURI creates an instance of the ApplicationTypeVersionsClient client.
+func NewApplicationTypeVersionsClientWithBaseURI(baseURI string, subscriptionID string) ApplicationTypeVersionsClient {
+ return ApplicationTypeVersionsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// Create create or update a Service Fabric application type version resource with the specified name.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// clusterName - the name of the cluster resource.
+// applicationTypeName - the name of the application type name resource.
+// version - the application type version.
+// parameters - the application type version resource.
+func (client ApplicationTypeVersionsClient) Create(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, version string, parameters ApplicationTypeVersionResource) (result ApplicationTypeVersionsCreateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationTypeVersionsClient.Create")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters.ApplicationTypeVersionResourceProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.ApplicationTypeVersionResourceProperties.AppPackageURL", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil {
+ return result, validation.NewError("servicefabric.ApplicationTypeVersionsClient", "Create", err.Error())
+ }
+
+ req, err := client.CreatePreparer(ctx, resourceGroupName, clusterName, applicationTypeName, version, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypeVersionsClient", "Create", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.CreateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypeVersionsClient", "Create", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// CreatePreparer prepares the Create request.
+func (client ApplicationTypeVersionsClient) CreatePreparer(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, version string, parameters ApplicationTypeVersionResource) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "applicationTypeName": autorest.Encode("path", applicationTypeName),
+ "clusterName": autorest.Encode("path", clusterName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "version": autorest.Encode("path", version),
+ }
+
+ const APIVersion = "2019-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applicationTypes/{applicationTypeName}/versions/{version}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateSender sends the Create request. The method will close the
+// http.Response Body if it receives an error.
+func (client ApplicationTypeVersionsClient) CreateSender(req *http.Request) (future ApplicationTypeVersionsCreateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// CreateResponder handles the response to the Create request. The method always
+// closes the http.Response Body.
+func (client ApplicationTypeVersionsClient) CreateResponder(resp *http.Response) (result ApplicationTypeVersionResource, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete delete a Service Fabric application type version resource with the specified name.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// clusterName - the name of the cluster resource.
+// applicationTypeName - the name of the application type name resource.
+// version - the application type version.
+func (client ApplicationTypeVersionsClient) Delete(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, version string) (result ApplicationTypeVersionsDeleteFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationTypeVersionsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, resourceGroupName, clusterName, applicationTypeName, version)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypeVersionsClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.DeleteSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypeVersionsClient", "Delete", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client ApplicationTypeVersionsClient) DeletePreparer(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, version string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "applicationTypeName": autorest.Encode("path", applicationTypeName),
+ "clusterName": autorest.Encode("path", clusterName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "version": autorest.Encode("path", version),
+ }
+
+ const APIVersion = "2019-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applicationTypes/{applicationTypeName}/versions/{version}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client ApplicationTypeVersionsClient) DeleteSender(req *http.Request) (future ApplicationTypeVersionsDeleteFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client ApplicationTypeVersionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get get a Service Fabric application type version resource created or in the process of being created in the Service
+// Fabric application type name resource.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// clusterName - the name of the cluster resource.
+// applicationTypeName - the name of the application type name resource.
+// version - the application type version.
+func (client ApplicationTypeVersionsClient) Get(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, version string) (result ApplicationTypeVersionResource, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationTypeVersionsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, resourceGroupName, clusterName, applicationTypeName, version)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypeVersionsClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypeVersionsClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypeVersionsClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client ApplicationTypeVersionsClient) GetPreparer(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, version string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "applicationTypeName": autorest.Encode("path", applicationTypeName),
+ "clusterName": autorest.Encode("path", clusterName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "version": autorest.Encode("path", version),
+ }
+
+ const APIVersion = "2019-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applicationTypes/{applicationTypeName}/versions/{version}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client ApplicationTypeVersionsClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client ApplicationTypeVersionsClient) GetResponder(resp *http.Response) (result ApplicationTypeVersionResource, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List gets all application type version resources created or in the process of being created in the Service Fabric
+// application type name resource.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// clusterName - the name of the cluster resource.
+// applicationTypeName - the name of the application type name resource.
+func (client ApplicationTypeVersionsClient) List(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string) (result ApplicationTypeVersionResourceList, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationTypeVersionsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.ListPreparer(ctx, resourceGroupName, clusterName, applicationTypeName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypeVersionsClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypeVersionsClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypeVersionsClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client ApplicationTypeVersionsClient) ListPreparer(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "applicationTypeName": autorest.Encode("path", applicationTypeName),
+ "clusterName": autorest.Encode("path", clusterName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applicationTypes/{applicationTypeName}/versions", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client ApplicationTypeVersionsClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client ApplicationTypeVersionsClient) ListResponder(resp *http.Response) (result ApplicationTypeVersionResourceList, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/client.go b/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/client.go
new file mode 100644
index 000000000000..0563316480f8
--- /dev/null
+++ b/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/client.go
@@ -0,0 +1,51 @@
+// Package servicefabric implements the Azure ARM Servicefabric service API version 2019-03-01-preview.
+//
+// Service Fabric Management Client
+package servicefabric
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/Azure/go-autorest/autorest"
+)
+
+const (
+ // DefaultBaseURI is the default URI used for the service Servicefabric
+ DefaultBaseURI = "https://management.azure.com"
+)
+
+// BaseClient is the base client for Servicefabric.
+type BaseClient struct {
+ autorest.Client
+ BaseURI string
+ SubscriptionID string
+}
+
+// New creates an instance of the BaseClient client.
+func New(subscriptionID string) BaseClient {
+ return NewWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewWithBaseURI creates an instance of the BaseClient client.
+func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient {
+ return BaseClient{
+ Client: autorest.NewClientWithUserAgent(UserAgent()),
+ BaseURI: baseURI,
+ SubscriptionID: subscriptionID,
+ }
+}
diff --git a/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/clusters.go b/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/clusters.go
new file mode 100644
index 000000000000..82e30df9d6d1
--- /dev/null
+++ b/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/clusters.go
@@ -0,0 +1,557 @@
+package servicefabric
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// ClustersClient is the service Fabric Management Client
+type ClustersClient struct {
+ BaseClient
+}
+
+// NewClustersClient creates an instance of the ClustersClient client.
+func NewClustersClient(subscriptionID string) ClustersClient {
+ return NewClustersClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewClustersClientWithBaseURI creates an instance of the ClustersClient client.
+func NewClustersClientWithBaseURI(baseURI string, subscriptionID string) ClustersClient {
+ return ClustersClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// Create create or update a Service Fabric cluster resource with the specified name.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// clusterName - the name of the cluster resource.
+// parameters - the cluster resource.
+func (client ClustersClient) Create(ctx context.Context, resourceGroupName string, clusterName string, parameters Cluster) (result ClustersCreateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ClustersClient.Create")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters.ClusterProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.ClusterProperties.Certificate", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.ClusterProperties.Certificate.Thumbprint", Name: validation.Null, Rule: true, Chain: nil}}},
+ {Target: "parameters.ClusterProperties.DiagnosticsStorageAccountConfig", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.ClusterProperties.DiagnosticsStorageAccountConfig.StorageAccountName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.ClusterProperties.DiagnosticsStorageAccountConfig.ProtectedAccountKeyName", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.ClusterProperties.DiagnosticsStorageAccountConfig.BlobEndpoint", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.ClusterProperties.DiagnosticsStorageAccountConfig.QueueEndpoint", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.ClusterProperties.DiagnosticsStorageAccountConfig.TableEndpoint", Name: validation.Null, Rule: true, Chain: nil},
+ }},
+ {Target: "parameters.ClusterProperties.ManagementEndpoint", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.ClusterProperties.NodeTypes", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.ClusterProperties.ReverseProxyCertificate", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.ClusterProperties.ReverseProxyCertificate.Thumbprint", Name: validation.Null, Rule: true, Chain: nil}}},
+ {Target: "parameters.ClusterProperties.UpgradeDescription", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.ClusterProperties.UpgradeDescription.UpgradeReplicaSetCheckTimeout", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.ClusterProperties.UpgradeDescription.HealthCheckWaitDuration", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.ClusterProperties.UpgradeDescription.HealthCheckStableDuration", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.ClusterProperties.UpgradeDescription.HealthCheckRetryTimeout", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.ClusterProperties.UpgradeDescription.UpgradeTimeout", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.ClusterProperties.UpgradeDescription.UpgradeDomainTimeout", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.ClusterProperties.UpgradeDescription.HealthPolicy", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "parameters.ClusterProperties.UpgradeDescription.HealthPolicy.MaxPercentUnhealthyNodes", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.ClusterProperties.UpgradeDescription.HealthPolicy.MaxPercentUnhealthyNodes", Name: validation.InclusiveMaximum, Rule: int64(100), Chain: nil},
+ {Target: "parameters.ClusterProperties.UpgradeDescription.HealthPolicy.MaxPercentUnhealthyNodes", Name: validation.InclusiveMinimum, Rule: 0, Chain: nil},
+ }},
+ {Target: "parameters.ClusterProperties.UpgradeDescription.HealthPolicy.MaxPercentUnhealthyApplications", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.ClusterProperties.UpgradeDescription.HealthPolicy.MaxPercentUnhealthyApplications", Name: validation.InclusiveMaximum, Rule: int64(100), Chain: nil},
+ {Target: "parameters.ClusterProperties.UpgradeDescription.HealthPolicy.MaxPercentUnhealthyApplications", Name: validation.InclusiveMinimum, Rule: 0, Chain: nil},
+ }},
+ }},
+ {Target: "parameters.ClusterProperties.UpgradeDescription.DeltaHealthPolicy", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.ClusterProperties.UpgradeDescription.DeltaHealthPolicy.MaxPercentDeltaUnhealthyNodes", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "parameters.ClusterProperties.UpgradeDescription.DeltaHealthPolicy.MaxPercentDeltaUnhealthyNodes", Name: validation.InclusiveMaximum, Rule: int64(100), Chain: nil},
+ {Target: "parameters.ClusterProperties.UpgradeDescription.DeltaHealthPolicy.MaxPercentDeltaUnhealthyNodes", Name: validation.InclusiveMinimum, Rule: 0, Chain: nil},
+ }},
+ {Target: "parameters.ClusterProperties.UpgradeDescription.DeltaHealthPolicy.MaxPercentUpgradeDomainDeltaUnhealthyNodes", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "parameters.ClusterProperties.UpgradeDescription.DeltaHealthPolicy.MaxPercentUpgradeDomainDeltaUnhealthyNodes", Name: validation.InclusiveMaximum, Rule: int64(100), Chain: nil},
+ {Target: "parameters.ClusterProperties.UpgradeDescription.DeltaHealthPolicy.MaxPercentUpgradeDomainDeltaUnhealthyNodes", Name: validation.InclusiveMinimum, Rule: 0, Chain: nil},
+ }},
+ {Target: "parameters.ClusterProperties.UpgradeDescription.DeltaHealthPolicy.MaxPercentDeltaUnhealthyApplications", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "parameters.ClusterProperties.UpgradeDescription.DeltaHealthPolicy.MaxPercentDeltaUnhealthyApplications", Name: validation.InclusiveMaximum, Rule: int64(100), Chain: nil},
+ {Target: "parameters.ClusterProperties.UpgradeDescription.DeltaHealthPolicy.MaxPercentDeltaUnhealthyApplications", Name: validation.InclusiveMinimum, Rule: 0, Chain: nil},
+ }},
+ }},
+ }},
+ }}}}}); err != nil {
+ return result, validation.NewError("servicefabric.ClustersClient", "Create", err.Error())
+ }
+
+ req, err := client.CreatePreparer(ctx, resourceGroupName, clusterName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ClustersClient", "Create", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.CreateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ClustersClient", "Create", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// CreatePreparer prepares the Create request.
+func (client ClustersClient) CreatePreparer(ctx context.Context, resourceGroupName string, clusterName string, parameters Cluster) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "clusterName": autorest.Encode("path", clusterName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateSender sends the Create request. The method will close the
+// http.Response Body if it receives an error.
+func (client ClustersClient) CreateSender(req *http.Request) (future ClustersCreateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// CreateResponder handles the response to the Create request. The method always
+// closes the http.Response Body.
+func (client ClustersClient) CreateResponder(resp *http.Response) (result Cluster, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete delete a Service Fabric cluster resource with the specified name.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// clusterName - the name of the cluster resource.
+func (client ClustersClient) Delete(ctx context.Context, resourceGroupName string, clusterName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ClustersClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, resourceGroupName, clusterName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ClustersClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "servicefabric.ClustersClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ClustersClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client ClustersClient) DeletePreparer(ctx context.Context, resourceGroupName string, clusterName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "clusterName": autorest.Encode("path", clusterName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client ClustersClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client ClustersClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get get a Service Fabric cluster resource created or in the process of being created in the specified resource
+// group.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// clusterName - the name of the cluster resource.
+func (client ClustersClient) Get(ctx context.Context, resourceGroupName string, clusterName string) (result Cluster, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ClustersClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, resourceGroupName, clusterName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ClustersClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "servicefabric.ClustersClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ClustersClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client ClustersClient) GetPreparer(ctx context.Context, resourceGroupName string, clusterName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "clusterName": autorest.Encode("path", clusterName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client ClustersClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client ClustersClient) GetResponder(resp *http.Response) (result Cluster, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List gets all Service Fabric cluster resources created or in the process of being created in the subscription.
+func (client ClustersClient) List(ctx context.Context) (result ClusterListResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ClustersClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.ListPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ClustersClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "servicefabric.ClustersClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ClustersClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client ClustersClient) ListPreparer(ctx context.Context) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.ServiceFabric/clusters", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client ClustersClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client ClustersClient) ListResponder(resp *http.Response) (result ClusterListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByResourceGroup gets all Service Fabric cluster resources created or in the process of being created in the
+// resource group.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+func (client ClustersClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result ClusterListResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ClustersClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ClustersClient", "ListByResourceGroup", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByResourceGroupSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "servicefabric.ClustersClient", "ListByResourceGroup", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListByResourceGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ClustersClient", "ListByResourceGroup", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
+func (client ClustersClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
+// http.Response Body if it receives an error.
+func (client ClustersClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
+// closes the http.Response Body.
+func (client ClustersClient) ListByResourceGroupResponder(resp *http.Response) (result ClusterListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Update update the configuration of a Service Fabric cluster resource with the specified name.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// clusterName - the name of the cluster resource.
+// parameters - the parameters which contains the property value and property name which used to update the
+// cluster configuration.
+func (client ClustersClient) Update(ctx context.Context, resourceGroupName string, clusterName string, parameters ClusterUpdateParameters) (result ClustersUpdateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ClustersClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.UpdatePreparer(ctx, resourceGroupName, clusterName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ClustersClient", "Update", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.UpdateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ClustersClient", "Update", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// UpdatePreparer prepares the Update request.
+func (client ClustersClient) UpdatePreparer(ctx context.Context, resourceGroupName string, clusterName string, parameters ClusterUpdateParameters) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "clusterName": autorest.Encode("path", clusterName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateSender sends the Update request. The method will close the
+// http.Response Body if it receives an error.
+func (client ClustersClient) UpdateSender(req *http.Request) (future ClustersUpdateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// UpdateResponder handles the response to the Update request. The method always
+// closes the http.Response Body.
+func (client ClustersClient) UpdateResponder(resp *http.Response) (result Cluster, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/clusterversions.go b/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/clusterversions.go
new file mode 100644
index 000000000000..4bf841f027bc
--- /dev/null
+++ b/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/clusterversions.go
@@ -0,0 +1,349 @@
+package servicefabric
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// ClusterVersionsClient is the service Fabric Management Client
+type ClusterVersionsClient struct {
+ BaseClient
+}
+
+// NewClusterVersionsClient creates an instance of the ClusterVersionsClient client.
+func NewClusterVersionsClient(subscriptionID string) ClusterVersionsClient {
+ return NewClusterVersionsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewClusterVersionsClientWithBaseURI creates an instance of the ClusterVersionsClient client.
+func NewClusterVersionsClientWithBaseURI(baseURI string, subscriptionID string) ClusterVersionsClient {
+ return ClusterVersionsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// Get gets information about an available Service Fabric cluster code version.
+// Parameters:
+// location - the location for the cluster code versions. This is different from cluster location.
+// clusterVersion - the cluster code version.
+func (client ClusterVersionsClient) Get(ctx context.Context, location string, clusterVersion string) (result ClusterCodeVersionsListResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ClusterVersionsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, location, clusterVersion)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ClusterVersionsClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "servicefabric.ClusterVersionsClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ClusterVersionsClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client ClusterVersionsClient) GetPreparer(ctx context.Context, location string, clusterVersion string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "clusterVersion": autorest.Encode("path", clusterVersion),
+ "location": autorest.Encode("path", location),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.ServiceFabric/locations/{location}/clusterVersions/{clusterVersion}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client ClusterVersionsClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client ClusterVersionsClient) GetResponder(resp *http.Response) (result ClusterCodeVersionsListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetByEnvironment gets information about an available Service Fabric cluster code version by environment.
+// Parameters:
+// location - the location for the cluster code versions. This is different from cluster location.
+// environment - the operating system of the cluster. The default means all.
+// clusterVersion - the cluster code version.
+func (client ClusterVersionsClient) GetByEnvironment(ctx context.Context, location string, environment string, clusterVersion string) (result ClusterCodeVersionsListResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ClusterVersionsClient.GetByEnvironment")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetByEnvironmentPreparer(ctx, location, environment, clusterVersion)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ClusterVersionsClient", "GetByEnvironment", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetByEnvironmentSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "servicefabric.ClusterVersionsClient", "GetByEnvironment", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetByEnvironmentResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ClusterVersionsClient", "GetByEnvironment", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetByEnvironmentPreparer prepares the GetByEnvironment request.
+func (client ClusterVersionsClient) GetByEnvironmentPreparer(ctx context.Context, location string, environment string, clusterVersion string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "clusterVersion": autorest.Encode("path", clusterVersion),
+ "environment": autorest.Encode("path", environment),
+ "location": autorest.Encode("path", location),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.ServiceFabric/locations/{location}/environments/{environment}/clusterVersions/{clusterVersion}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetByEnvironmentSender sends the GetByEnvironment request. The method will close the
+// http.Response Body if it receives an error.
+func (client ClusterVersionsClient) GetByEnvironmentSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetByEnvironmentResponder handles the response to the GetByEnvironment request. The method always
+// closes the http.Response Body.
+func (client ClusterVersionsClient) GetByEnvironmentResponder(resp *http.Response) (result ClusterCodeVersionsListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List gets all available code versions for Service Fabric cluster resources by location.
+// Parameters:
+// location - the location for the cluster code versions. This is different from cluster location.
+func (client ClusterVersionsClient) List(ctx context.Context, location string) (result ClusterCodeVersionsListResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ClusterVersionsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.ListPreparer(ctx, location)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ClusterVersionsClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "servicefabric.ClusterVersionsClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ClusterVersionsClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client ClusterVersionsClient) ListPreparer(ctx context.Context, location string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "location": autorest.Encode("path", location),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.ServiceFabric/locations/{location}/clusterVersions", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client ClusterVersionsClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client ClusterVersionsClient) ListResponder(resp *http.Response) (result ClusterCodeVersionsListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByEnvironment gets all available code versions for Service Fabric cluster resources by environment.
+// Parameters:
+// location - the location for the cluster code versions. This is different from cluster location.
+// environment - the operating system of the cluster. The default means all.
+func (client ClusterVersionsClient) ListByEnvironment(ctx context.Context, location string, environment string) (result ClusterCodeVersionsListResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ClusterVersionsClient.ListByEnvironment")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.ListByEnvironmentPreparer(ctx, location, environment)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ClusterVersionsClient", "ListByEnvironment", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByEnvironmentSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "servicefabric.ClusterVersionsClient", "ListByEnvironment", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListByEnvironmentResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ClusterVersionsClient", "ListByEnvironment", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByEnvironmentPreparer prepares the ListByEnvironment request.
+func (client ClusterVersionsClient) ListByEnvironmentPreparer(ctx context.Context, location string, environment string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "environment": autorest.Encode("path", environment),
+ "location": autorest.Encode("path", location),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.ServiceFabric/locations/{location}/environments/{environment}/clusterVersions", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByEnvironmentSender sends the ListByEnvironment request. The method will close the
+// http.Response Body if it receives an error.
+func (client ClusterVersionsClient) ListByEnvironmentSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByEnvironmentResponder handles the response to the ListByEnvironment request. The method always
+// closes the http.Response Body.
+func (client ClusterVersionsClient) ListByEnvironmentResponder(resp *http.Response) (result ClusterCodeVersionsListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/models.go b/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/models.go
new file mode 100644
index 000000000000..b4a34e62c15a
--- /dev/null
+++ b/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/models.go
@@ -0,0 +1,4239 @@
+package servicefabric
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "encoding/json"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/date"
+ "github.com/Azure/go-autorest/autorest/to"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// The package's fully qualified name.
+const fqdn = "github.com/Azure/azure-sdk-for-go/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric"
+
+// ArmServicePackageActivationMode enumerates the values for arm service package activation mode.
+type ArmServicePackageActivationMode string
+
+const (
+ // ExclusiveProcess Indicates the application package activation mode will use exclusive process.
+ ExclusiveProcess ArmServicePackageActivationMode = "ExclusiveProcess"
+ // SharedProcess Indicates the application package activation mode will use shared process.
+ SharedProcess ArmServicePackageActivationMode = "SharedProcess"
+)
+
+// PossibleArmServicePackageActivationModeValues returns an array of possible values for the ArmServicePackageActivationMode const type.
+func PossibleArmServicePackageActivationModeValues() []ArmServicePackageActivationMode {
+ return []ArmServicePackageActivationMode{ExclusiveProcess, SharedProcess}
+}
+
+// ArmUpgradeFailureAction enumerates the values for arm upgrade failure action.
+type ArmUpgradeFailureAction string
+
+const (
+ // Manual Indicates that a manual repair will need to be performed by the administrator if the upgrade
+ // fails. Service Fabric will not proceed to the next upgrade domain automatically.
+ Manual ArmUpgradeFailureAction = "Manual"
+ // Rollback Indicates that a rollback of the upgrade will be performed by Service Fabric if the upgrade
+ // fails.
+ Rollback ArmUpgradeFailureAction = "Rollback"
+)
+
+// PossibleArmUpgradeFailureActionValues returns an array of possible values for the ArmUpgradeFailureAction const type.
+func PossibleArmUpgradeFailureActionValues() []ArmUpgradeFailureAction {
+ return []ArmUpgradeFailureAction{Manual, Rollback}
+}
+
+// ClusterState enumerates the values for cluster state.
+type ClusterState string
+
+const (
+ // AutoScale ...
+ AutoScale ClusterState = "AutoScale"
+ // BaselineUpgrade ...
+ BaselineUpgrade ClusterState = "BaselineUpgrade"
+ // Deploying ...
+ Deploying ClusterState = "Deploying"
+ // EnforcingClusterVersion ...
+ EnforcingClusterVersion ClusterState = "EnforcingClusterVersion"
+ // Ready ...
+ Ready ClusterState = "Ready"
+ // UpdatingInfrastructure ...
+ UpdatingInfrastructure ClusterState = "UpdatingInfrastructure"
+ // UpdatingUserCertificate ...
+ UpdatingUserCertificate ClusterState = "UpdatingUserCertificate"
+ // UpdatingUserConfiguration ...
+ UpdatingUserConfiguration ClusterState = "UpdatingUserConfiguration"
+ // UpgradeServiceUnreachable ...
+ UpgradeServiceUnreachable ClusterState = "UpgradeServiceUnreachable"
+ // WaitingForNodes ...
+ WaitingForNodes ClusterState = "WaitingForNodes"
+)
+
+// PossibleClusterStateValues returns an array of possible values for the ClusterState const type.
+func PossibleClusterStateValues() []ClusterState {
+ return []ClusterState{AutoScale, BaselineUpgrade, Deploying, EnforcingClusterVersion, Ready, UpdatingInfrastructure, UpdatingUserCertificate, UpdatingUserConfiguration, UpgradeServiceUnreachable, WaitingForNodes}
+}
+
+// DurabilityLevel enumerates the values for durability level.
+type DurabilityLevel string
+
+const (
+ // Bronze ...
+ Bronze DurabilityLevel = "Bronze"
+ // Gold ...
+ Gold DurabilityLevel = "Gold"
+ // Silver ...
+ Silver DurabilityLevel = "Silver"
+)
+
+// PossibleDurabilityLevelValues returns an array of possible values for the DurabilityLevel const type.
+func PossibleDurabilityLevelValues() []DurabilityLevel {
+ return []DurabilityLevel{Bronze, Gold, Silver}
+}
+
+// Environment enumerates the values for environment.
+type Environment string
+
+const (
+ // Linux ...
+ Linux Environment = "Linux"
+ // Windows ...
+ Windows Environment = "Windows"
+)
+
+// PossibleEnvironmentValues returns an array of possible values for the Environment const type.
+func PossibleEnvironmentValues() []Environment {
+ return []Environment{Linux, Windows}
+}
+
+// MoveCost enumerates the values for move cost.
+type MoveCost string
+
+const (
+ // High Specifies the move cost of the service as High. The value is 3.
+ High MoveCost = "High"
+ // Low Specifies the move cost of the service as Low. The value is 1.
+ Low MoveCost = "Low"
+ // Medium Specifies the move cost of the service as Medium. The value is 2.
+ Medium MoveCost = "Medium"
+ // Zero Zero move cost. This value is zero.
+ Zero MoveCost = "Zero"
+)
+
+// PossibleMoveCostValues returns an array of possible values for the MoveCost const type.
+func PossibleMoveCostValues() []MoveCost {
+ return []MoveCost{High, Low, Medium, Zero}
+}
+
+// PartitionScheme enumerates the values for partition scheme.
+type PartitionScheme string
+
+const (
+ // Invalid Indicates the partition kind is invalid. All Service Fabric enumerations have the invalid type.
+ // The value is zero.
+ Invalid PartitionScheme = "Invalid"
+ // Named Indicates that the partition is based on string names, and is a NamedPartitionSchemeDescription
+ // object. The value is 3
+ Named PartitionScheme = "Named"
+ // Singleton Indicates that the partition is based on string names, and is a
+ // SingletonPartitionSchemeDescription object, The value is 1.
+ Singleton PartitionScheme = "Singleton"
+ // UniformInt64Range Indicates that the partition is based on Int64 key ranges, and is a
+ // UniformInt64RangePartitionSchemeDescription object. The value is 2.
+ UniformInt64Range PartitionScheme = "UniformInt64Range"
+)
+
+// PossiblePartitionSchemeValues returns an array of possible values for the PartitionScheme const type.
+func PossiblePartitionSchemeValues() []PartitionScheme {
+ return []PartitionScheme{Invalid, Named, Singleton, UniformInt64Range}
+}
+
+// PartitionSchemeBasicPartitionSchemeDescription enumerates the values for partition scheme basic partition
+// scheme description.
+type PartitionSchemeBasicPartitionSchemeDescription string
+
+const (
+ // PartitionSchemeNamed ...
+ PartitionSchemeNamed PartitionSchemeBasicPartitionSchemeDescription = "Named"
+ // PartitionSchemePartitionSchemeDescription ...
+ PartitionSchemePartitionSchemeDescription PartitionSchemeBasicPartitionSchemeDescription = "PartitionSchemeDescription"
+ // PartitionSchemeSingleton ...
+ PartitionSchemeSingleton PartitionSchemeBasicPartitionSchemeDescription = "Singleton"
+ // PartitionSchemeUniformInt64Range ...
+ PartitionSchemeUniformInt64Range PartitionSchemeBasicPartitionSchemeDescription = "UniformInt64Range"
+)
+
+// PossiblePartitionSchemeBasicPartitionSchemeDescriptionValues returns an array of possible values for the PartitionSchemeBasicPartitionSchemeDescription const type.
+func PossiblePartitionSchemeBasicPartitionSchemeDescriptionValues() []PartitionSchemeBasicPartitionSchemeDescription {
+ return []PartitionSchemeBasicPartitionSchemeDescription{PartitionSchemeNamed, PartitionSchemePartitionSchemeDescription, PartitionSchemeSingleton, PartitionSchemeUniformInt64Range}
+}
+
+// ProvisioningState enumerates the values for provisioning state.
+type ProvisioningState string
+
+const (
+ // Canceled ...
+ Canceled ProvisioningState = "Canceled"
+ // Failed ...
+ Failed ProvisioningState = "Failed"
+ // Succeeded ...
+ Succeeded ProvisioningState = "Succeeded"
+ // Updating ...
+ Updating ProvisioningState = "Updating"
+)
+
+// PossibleProvisioningStateValues returns an array of possible values for the ProvisioningState const type.
+func PossibleProvisioningStateValues() []ProvisioningState {
+ return []ProvisioningState{Canceled, Failed, Succeeded, Updating}
+}
+
+// ReliabilityLevel enumerates the values for reliability level.
+type ReliabilityLevel string
+
+const (
+ // ReliabilityLevelBronze ...
+ ReliabilityLevelBronze ReliabilityLevel = "Bronze"
+ // ReliabilityLevelGold ...
+ ReliabilityLevelGold ReliabilityLevel = "Gold"
+ // ReliabilityLevelNone ...
+ ReliabilityLevelNone ReliabilityLevel = "None"
+ // ReliabilityLevelPlatinum ...
+ ReliabilityLevelPlatinum ReliabilityLevel = "Platinum"
+ // ReliabilityLevelSilver ...
+ ReliabilityLevelSilver ReliabilityLevel = "Silver"
+)
+
+// PossibleReliabilityLevelValues returns an array of possible values for the ReliabilityLevel const type.
+func PossibleReliabilityLevelValues() []ReliabilityLevel {
+ return []ReliabilityLevel{ReliabilityLevelBronze, ReliabilityLevelGold, ReliabilityLevelNone, ReliabilityLevelPlatinum, ReliabilityLevelSilver}
+}
+
+// ReliabilityLevel1 enumerates the values for reliability level 1.
+type ReliabilityLevel1 string
+
+const (
+ // ReliabilityLevel1Bronze ...
+ ReliabilityLevel1Bronze ReliabilityLevel1 = "Bronze"
+ // ReliabilityLevel1Gold ...
+ ReliabilityLevel1Gold ReliabilityLevel1 = "Gold"
+ // ReliabilityLevel1None ...
+ ReliabilityLevel1None ReliabilityLevel1 = "None"
+ // ReliabilityLevel1Platinum ...
+ ReliabilityLevel1Platinum ReliabilityLevel1 = "Platinum"
+ // ReliabilityLevel1Silver ...
+ ReliabilityLevel1Silver ReliabilityLevel1 = "Silver"
+)
+
+// PossibleReliabilityLevel1Values returns an array of possible values for the ReliabilityLevel1 const type.
+func PossibleReliabilityLevel1Values() []ReliabilityLevel1 {
+ return []ReliabilityLevel1{ReliabilityLevel1Bronze, ReliabilityLevel1Gold, ReliabilityLevel1None, ReliabilityLevel1Platinum, ReliabilityLevel1Silver}
+}
+
+// ServiceCorrelationScheme enumerates the values for service correlation scheme.
+type ServiceCorrelationScheme string
+
+const (
+ // ServiceCorrelationSchemeAffinity Indicates that this service has an affinity relationship with another
+ // service. Provided for backwards compatibility, consider preferring the Aligned or NonAlignedAffinity
+ // options. The value is 1.
+ ServiceCorrelationSchemeAffinity ServiceCorrelationScheme = "Affinity"
+ // ServiceCorrelationSchemeAlignedAffinity Aligned affinity ensures that the primaries of the partitions of
+ // the affinitized services are collocated on the same nodes. This is the default and is the same as
+ // selecting the Affinity scheme. The value is 2.
+ ServiceCorrelationSchemeAlignedAffinity ServiceCorrelationScheme = "AlignedAffinity"
+ // ServiceCorrelationSchemeInvalid An invalid correlation scheme. Cannot be used. The value is zero.
+ ServiceCorrelationSchemeInvalid ServiceCorrelationScheme = "Invalid"
+ // ServiceCorrelationSchemeNonAlignedAffinity Non-Aligned affinity guarantees that all replicas of each
+ // service will be placed on the same nodes. Unlike Aligned Affinity, this does not guarantee that replicas
+ // of particular role will be collocated. The value is 3.
+ ServiceCorrelationSchemeNonAlignedAffinity ServiceCorrelationScheme = "NonAlignedAffinity"
+)
+
+// PossibleServiceCorrelationSchemeValues returns an array of possible values for the ServiceCorrelationScheme const type.
+func PossibleServiceCorrelationSchemeValues() []ServiceCorrelationScheme {
+ return []ServiceCorrelationScheme{ServiceCorrelationSchemeAffinity, ServiceCorrelationSchemeAlignedAffinity, ServiceCorrelationSchemeInvalid, ServiceCorrelationSchemeNonAlignedAffinity}
+}
+
+// ServiceKind enumerates the values for service kind.
+type ServiceKind string
+
+const (
+ // ServiceKindInvalid Indicates the service kind is invalid. All Service Fabric enumerations have the
+ // invalid type. The value is zero.
+ ServiceKindInvalid ServiceKind = "Invalid"
+ // ServiceKindStateful Uses Service Fabric to make its state or part of its state highly available and
+ // reliable. The value is 2.
+ ServiceKindStateful ServiceKind = "Stateful"
+ // ServiceKindStateless Does not use Service Fabric to make its state highly available or reliable. The
+ // value is 1.
+ ServiceKindStateless ServiceKind = "Stateless"
+)
+
+// PossibleServiceKindValues returns an array of possible values for the ServiceKind const type.
+func PossibleServiceKindValues() []ServiceKind {
+ return []ServiceKind{ServiceKindInvalid, ServiceKindStateful, ServiceKindStateless}
+}
+
+// ServiceKindBasicServiceResourceProperties enumerates the values for service kind basic service resource
+// properties.
+type ServiceKindBasicServiceResourceProperties string
+
+const (
+ // ServiceKindServiceResourceProperties ...
+ ServiceKindServiceResourceProperties ServiceKindBasicServiceResourceProperties = "ServiceResourceProperties"
+ // ServiceKindStateful1 ...
+ ServiceKindStateful1 ServiceKindBasicServiceResourceProperties = "Stateful"
+ // ServiceKindStateless1 ...
+ ServiceKindStateless1 ServiceKindBasicServiceResourceProperties = "Stateless"
+)
+
+// PossibleServiceKindBasicServiceResourcePropertiesValues returns an array of possible values for the ServiceKindBasicServiceResourceProperties const type.
+func PossibleServiceKindBasicServiceResourcePropertiesValues() []ServiceKindBasicServiceResourceProperties {
+ return []ServiceKindBasicServiceResourceProperties{ServiceKindServiceResourceProperties, ServiceKindStateful1, ServiceKindStateless1}
+}
+
+// ServiceKindBasicServiceResourceUpdateProperties enumerates the values for service kind basic service
+// resource update properties.
+type ServiceKindBasicServiceResourceUpdateProperties string
+
+const (
+ // ServiceKindBasicServiceResourceUpdatePropertiesServiceKindServiceResourceUpdateProperties ...
+ ServiceKindBasicServiceResourceUpdatePropertiesServiceKindServiceResourceUpdateProperties ServiceKindBasicServiceResourceUpdateProperties = "ServiceResourceUpdateProperties"
+ // ServiceKindBasicServiceResourceUpdatePropertiesServiceKindStateful ...
+ ServiceKindBasicServiceResourceUpdatePropertiesServiceKindStateful ServiceKindBasicServiceResourceUpdateProperties = "Stateful"
+ // ServiceKindBasicServiceResourceUpdatePropertiesServiceKindStateless ...
+ ServiceKindBasicServiceResourceUpdatePropertiesServiceKindStateless ServiceKindBasicServiceResourceUpdateProperties = "Stateless"
+)
+
+// PossibleServiceKindBasicServiceResourceUpdatePropertiesValues returns an array of possible values for the ServiceKindBasicServiceResourceUpdateProperties const type.
+func PossibleServiceKindBasicServiceResourceUpdatePropertiesValues() []ServiceKindBasicServiceResourceUpdateProperties {
+ return []ServiceKindBasicServiceResourceUpdateProperties{ServiceKindBasicServiceResourceUpdatePropertiesServiceKindServiceResourceUpdateProperties, ServiceKindBasicServiceResourceUpdatePropertiesServiceKindStateful, ServiceKindBasicServiceResourceUpdatePropertiesServiceKindStateless}
+}
+
+// ServiceLoadMetricWeight enumerates the values for service load metric weight.
+type ServiceLoadMetricWeight string
+
+const (
+ // ServiceLoadMetricWeightHigh Specifies the metric weight of the service load as High. The value is 3.
+ ServiceLoadMetricWeightHigh ServiceLoadMetricWeight = "High"
+ // ServiceLoadMetricWeightLow Specifies the metric weight of the service load as Low. The value is 1.
+ ServiceLoadMetricWeightLow ServiceLoadMetricWeight = "Low"
+ // ServiceLoadMetricWeightMedium Specifies the metric weight of the service load as Medium. The value is 2.
+ ServiceLoadMetricWeightMedium ServiceLoadMetricWeight = "Medium"
+ // ServiceLoadMetricWeightZero Disables resource balancing for this metric. This value is zero.
+ ServiceLoadMetricWeightZero ServiceLoadMetricWeight = "Zero"
+)
+
+// PossibleServiceLoadMetricWeightValues returns an array of possible values for the ServiceLoadMetricWeight const type.
+func PossibleServiceLoadMetricWeightValues() []ServiceLoadMetricWeight {
+ return []ServiceLoadMetricWeight{ServiceLoadMetricWeightHigh, ServiceLoadMetricWeightLow, ServiceLoadMetricWeightMedium, ServiceLoadMetricWeightZero}
+}
+
+// ServicePlacementPolicyType enumerates the values for service placement policy type.
+type ServicePlacementPolicyType string
+
+const (
+ // ServicePlacementPolicyTypeInvalid Indicates the type of the placement policy is invalid. All Service
+ // Fabric enumerations have the invalid type. The value is zero.
+ ServicePlacementPolicyTypeInvalid ServicePlacementPolicyType = "Invalid"
+ // ServicePlacementPolicyTypeInvalidDomain Indicates that the ServicePlacementPolicyDescription is of type
+ // ServicePlacementInvalidDomainPolicyDescription, which indicates that a particular fault or upgrade
+ // domain cannot be used for placement of this service. The value is 1.
+ ServicePlacementPolicyTypeInvalidDomain ServicePlacementPolicyType = "InvalidDomain"
+ // ServicePlacementPolicyTypeNonPartiallyPlaceService Indicates that the ServicePlacementPolicyDescription
+ // is of type ServicePlacementNonPartiallyPlaceServicePolicyDescription, which indicates that if possible
+ // all replicas of a particular partition of the service should be placed atomically. The value is 5.
+ ServicePlacementPolicyTypeNonPartiallyPlaceService ServicePlacementPolicyType = "NonPartiallyPlaceService"
+ // ServicePlacementPolicyTypePreferredPrimaryDomain Indicates that the ServicePlacementPolicyDescription is
+ // of type ServicePlacementPreferPrimaryDomainPolicyDescription, which indicates that if possible the
+ // Primary replica for the partitions of the service should be located in a particular domain as an
+ // optimization. The value is 3.
+ ServicePlacementPolicyTypePreferredPrimaryDomain ServicePlacementPolicyType = "PreferredPrimaryDomain"
+ // ServicePlacementPolicyTypeRequiredDomain Indicates that the ServicePlacementPolicyDescription is of type
+ // ServicePlacementRequireDomainDistributionPolicyDescription indicating that the replicas of the service
+ // must be placed in a specific domain. The value is 2.
+ ServicePlacementPolicyTypeRequiredDomain ServicePlacementPolicyType = "RequiredDomain"
+ // ServicePlacementPolicyTypeRequiredDomainDistribution Indicates that the
+ // ServicePlacementPolicyDescription is of type ServicePlacementRequireDomainDistributionPolicyDescription,
+ // indicating that the system will disallow placement of any two replicas from the same partition in the
+ // same domain at any time. The value is 4.
+ ServicePlacementPolicyTypeRequiredDomainDistribution ServicePlacementPolicyType = "RequiredDomainDistribution"
+)
+
+// PossibleServicePlacementPolicyTypeValues returns an array of possible values for the ServicePlacementPolicyType const type.
+func PossibleServicePlacementPolicyTypeValues() []ServicePlacementPolicyType {
+ return []ServicePlacementPolicyType{ServicePlacementPolicyTypeInvalid, ServicePlacementPolicyTypeInvalidDomain, ServicePlacementPolicyTypeNonPartiallyPlaceService, ServicePlacementPolicyTypePreferredPrimaryDomain, ServicePlacementPolicyTypeRequiredDomain, ServicePlacementPolicyTypeRequiredDomainDistribution}
+}
+
+// Type enumerates the values for type.
+type Type string
+
+const (
+ // TypeServicePlacementPolicyDescription ...
+ TypeServicePlacementPolicyDescription Type = "ServicePlacementPolicyDescription"
+)
+
+// PossibleTypeValues returns an array of possible values for the Type const type.
+func PossibleTypeValues() []Type {
+ return []Type{TypeServicePlacementPolicyDescription}
+}
+
+// UpgradeMode enumerates the values for upgrade mode.
+type UpgradeMode string
+
+const (
+ // UpgradeModeAutomatic ...
+ UpgradeModeAutomatic UpgradeMode = "Automatic"
+ // UpgradeModeManual ...
+ UpgradeModeManual UpgradeMode = "Manual"
+)
+
+// PossibleUpgradeModeValues returns an array of possible values for the UpgradeMode const type.
+func PossibleUpgradeModeValues() []UpgradeMode {
+ return []UpgradeMode{UpgradeModeAutomatic, UpgradeModeManual}
+}
+
+// UpgradeMode1 enumerates the values for upgrade mode 1.
+type UpgradeMode1 string
+
+const (
+ // UpgradeMode1Automatic ...
+ UpgradeMode1Automatic UpgradeMode1 = "Automatic"
+ // UpgradeMode1Manual ...
+ UpgradeMode1Manual UpgradeMode1 = "Manual"
+)
+
+// PossibleUpgradeMode1Values returns an array of possible values for the UpgradeMode1 const type.
+func PossibleUpgradeMode1Values() []UpgradeMode1 {
+ return []UpgradeMode1{UpgradeMode1Automatic, UpgradeMode1Manual}
+}
+
+// X509StoreName enumerates the values for x509 store name.
+type X509StoreName string
+
+const (
+ // AddressBook ...
+ AddressBook X509StoreName = "AddressBook"
+ // AuthRoot ...
+ AuthRoot X509StoreName = "AuthRoot"
+ // CertificateAuthority ...
+ CertificateAuthority X509StoreName = "CertificateAuthority"
+ // Disallowed ...
+ Disallowed X509StoreName = "Disallowed"
+ // My ...
+ My X509StoreName = "My"
+ // Root ...
+ Root X509StoreName = "Root"
+ // TrustedPeople ...
+ TrustedPeople X509StoreName = "TrustedPeople"
+ // TrustedPublisher ...
+ TrustedPublisher X509StoreName = "TrustedPublisher"
+)
+
+// PossibleX509StoreNameValues returns an array of possible values for the X509StoreName const type.
+func PossibleX509StoreNameValues() []X509StoreName {
+ return []X509StoreName{AddressBook, AuthRoot, CertificateAuthority, Disallowed, My, Root, TrustedPeople, TrustedPublisher}
+}
+
+// X509StoreName1 enumerates the values for x509 store name 1.
+type X509StoreName1 string
+
+const (
+ // X509StoreName1AddressBook ...
+ X509StoreName1AddressBook X509StoreName1 = "AddressBook"
+ // X509StoreName1AuthRoot ...
+ X509StoreName1AuthRoot X509StoreName1 = "AuthRoot"
+ // X509StoreName1CertificateAuthority ...
+ X509StoreName1CertificateAuthority X509StoreName1 = "CertificateAuthority"
+ // X509StoreName1Disallowed ...
+ X509StoreName1Disallowed X509StoreName1 = "Disallowed"
+ // X509StoreName1My ...
+ X509StoreName1My X509StoreName1 = "My"
+ // X509StoreName1Root ...
+ X509StoreName1Root X509StoreName1 = "Root"
+ // X509StoreName1TrustedPeople ...
+ X509StoreName1TrustedPeople X509StoreName1 = "TrustedPeople"
+ // X509StoreName1TrustedPublisher ...
+ X509StoreName1TrustedPublisher X509StoreName1 = "TrustedPublisher"
+)
+
+// PossibleX509StoreName1Values returns an array of possible values for the X509StoreName1 const type.
+func PossibleX509StoreName1Values() []X509StoreName1 {
+ return []X509StoreName1{X509StoreName1AddressBook, X509StoreName1AuthRoot, X509StoreName1CertificateAuthority, X509StoreName1Disallowed, X509StoreName1My, X509StoreName1Root, X509StoreName1TrustedPeople, X509StoreName1TrustedPublisher}
+}
+
+// ApplicationDeltaHealthPolicy defines a delta health policy used to evaluate the health of an application
+// or one of its child entities when upgrading the cluster.
+type ApplicationDeltaHealthPolicy struct {
+ // DefaultServiceTypeDeltaHealthPolicy - The delta health policy used by default to evaluate the health of a service type when upgrading the cluster.
+ DefaultServiceTypeDeltaHealthPolicy *ServiceTypeDeltaHealthPolicy `json:"defaultServiceTypeDeltaHealthPolicy,omitempty"`
+ // ServiceTypeDeltaHealthPolicies - The map with service type delta health policy per service type name. The map is empty by default.
+ ServiceTypeDeltaHealthPolicies map[string]*ServiceTypeDeltaHealthPolicy `json:"serviceTypeDeltaHealthPolicies"`
+}
+
+// MarshalJSON is the custom marshaler for ApplicationDeltaHealthPolicy.
+func (adhp ApplicationDeltaHealthPolicy) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if adhp.DefaultServiceTypeDeltaHealthPolicy != nil {
+ objectMap["defaultServiceTypeDeltaHealthPolicy"] = adhp.DefaultServiceTypeDeltaHealthPolicy
+ }
+ if adhp.ServiceTypeDeltaHealthPolicies != nil {
+ objectMap["serviceTypeDeltaHealthPolicies"] = adhp.ServiceTypeDeltaHealthPolicies
+ }
+ return json.Marshal(objectMap)
+}
+
+// ApplicationHealthPolicy defines a health policy used to evaluate the health of an application or one of
+// its children entities.
+type ApplicationHealthPolicy struct {
+ // DefaultServiceTypeHealthPolicy - The health policy used by default to evaluate the health of a service type.
+ DefaultServiceTypeHealthPolicy *ServiceTypeHealthPolicy `json:"defaultServiceTypeHealthPolicy,omitempty"`
+ // ServiceTypeHealthPolicies - The map with service type health policy per service type name. The map is empty by default.
+ ServiceTypeHealthPolicies map[string]*ServiceTypeHealthPolicy `json:"serviceTypeHealthPolicies"`
+}
+
+// MarshalJSON is the custom marshaler for ApplicationHealthPolicy.
+func (ahp ApplicationHealthPolicy) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if ahp.DefaultServiceTypeHealthPolicy != nil {
+ objectMap["defaultServiceTypeHealthPolicy"] = ahp.DefaultServiceTypeHealthPolicy
+ }
+ if ahp.ServiceTypeHealthPolicies != nil {
+ objectMap["serviceTypeHealthPolicies"] = ahp.ServiceTypeHealthPolicies
+ }
+ return json.Marshal(objectMap)
+}
+
+// ApplicationMetricDescription describes capacity information for a custom resource balancing metric. This
+// can be used to limit the total consumption of this metric by the services of this application.
+type ApplicationMetricDescription struct {
+ // Name - The name of the metric.
+ Name *string `json:"name,omitempty"`
+ // MaximumCapacity - The maximum node capacity for Service Fabric application.
+ // This is the maximum Load for an instance of this application on a single node. Even if the capacity of node is greater than this value, Service Fabric will limit the total load of services within the application on each node to this value.
+ // If set to zero, capacity for this metric is unlimited on each node.
+ // When creating a new application with application capacity defined, the product of MaximumNodes and this value must always be smaller than or equal to TotalApplicationCapacity.
+ // When updating existing application with application capacity, the product of MaximumNodes and this value must always be smaller than or equal to TotalApplicationCapacity.
+ MaximumCapacity *int64 `json:"maximumCapacity,omitempty"`
+ // ReservationCapacity - The node reservation capacity for Service Fabric application.
+ // This is the amount of load which is reserved on nodes which have instances of this application.
+ // If MinimumNodes is specified, then the product of these values will be the capacity reserved in the cluster for the application.
+ // If set to zero, no capacity is reserved for this metric.
+ // When setting application capacity or when updating application capacity; this value must be smaller than or equal to MaximumCapacity for each metric.
+ ReservationCapacity *int64 `json:"reservationCapacity,omitempty"`
+ // TotalApplicationCapacity - The total metric capacity for Service Fabric application.
+ // This is the total metric capacity for this application in the cluster. Service Fabric will try to limit the sum of loads of services within the application to this value.
+ // When creating a new application with application capacity defined, the product of MaximumNodes and MaximumCapacity must always be smaller than or equal to this value.
+ TotalApplicationCapacity *int64 `json:"totalApplicationCapacity,omitempty"`
+}
+
+// ApplicationResource the application resource.
+type ApplicationResource struct {
+ autorest.Response `json:"-"`
+ // ApplicationResourceProperties - The application resource properties.
+ *ApplicationResourceProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Azure resource identifier.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Azure resource name.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Azure resource type.
+ Type *string `json:"type,omitempty"`
+ // Location - Azure resource location.
+ Location *string `json:"location,omitempty"`
+ // Tags - Azure resource tags.
+ Tags map[string]*string `json:"tags"`
+ // Etag - READ-ONLY; Azure resource etag.
+ Etag *string `json:"etag,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ApplicationResource.
+func (ar ApplicationResource) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if ar.ApplicationResourceProperties != nil {
+ objectMap["properties"] = ar.ApplicationResourceProperties
+ }
+ if ar.Location != nil {
+ objectMap["location"] = ar.Location
+ }
+ if ar.Tags != nil {
+ objectMap["tags"] = ar.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ApplicationResource struct.
+func (ar *ApplicationResource) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var applicationResourceProperties ApplicationResourceProperties
+ err = json.Unmarshal(*v, &applicationResourceProperties)
+ if err != nil {
+ return err
+ }
+ ar.ApplicationResourceProperties = &applicationResourceProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ ar.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ ar.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ ar.Type = &typeVar
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ ar.Location = &location
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ ar.Tags = tags
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ ar.Etag = &etag
+ }
+ }
+ }
+
+ return nil
+}
+
+// ApplicationResourceList the list of application resources.
+type ApplicationResourceList struct {
+ autorest.Response `json:"-"`
+ Value *[]ApplicationResource `json:"value,omitempty"`
+ // NextLink - READ-ONLY; URL to get the next set of application list results if there are any.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// ApplicationResourceProperties the application resource properties.
+type ApplicationResourceProperties struct {
+ // ProvisioningState - READ-ONLY; The current deployment or provisioning state, which only appears in the response
+ ProvisioningState *string `json:"provisioningState,omitempty"`
+ // TypeName - The application type name as defined in the application manifest.
+ TypeName *string `json:"typeName,omitempty"`
+ // TypeVersion - The version of the application type as defined in the application manifest.
+ TypeVersion *string `json:"typeVersion,omitempty"`
+ // Parameters - List of application parameters with overridden values from their default values specified in the application manifest.
+ Parameters map[string]*string `json:"parameters"`
+ // UpgradePolicy - Describes the policy for a monitored application upgrade.
+ UpgradePolicy *ApplicationUpgradePolicy `json:"upgradePolicy,omitempty"`
+ // MinimumNodes - The minimum number of nodes where Service Fabric will reserve capacity for this application. Note that this does not mean that the services of this application will be placed on all of those nodes. If this property is set to zero, no capacity will be reserved. The value of this property cannot be more than the value of the MaximumNodes property.
+ MinimumNodes *int64 `json:"minimumNodes,omitempty"`
+ // MaximumNodes - The maximum number of nodes where Service Fabric will reserve capacity for this application. Note that this does not mean that the services of this application will be placed on all of those nodes. By default, the value of this property is zero and it means that the services can be placed on any node.
+ MaximumNodes *int64 `json:"maximumNodes,omitempty"`
+ // RemoveApplicationCapacity - Remove the current application capacity settings.
+ RemoveApplicationCapacity *bool `json:"removeApplicationCapacity,omitempty"`
+ // Metrics - List of application capacity metric description.
+ Metrics *[]ApplicationMetricDescription `json:"metrics,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ApplicationResourceProperties.
+func (arp ApplicationResourceProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if arp.TypeName != nil {
+ objectMap["typeName"] = arp.TypeName
+ }
+ if arp.TypeVersion != nil {
+ objectMap["typeVersion"] = arp.TypeVersion
+ }
+ if arp.Parameters != nil {
+ objectMap["parameters"] = arp.Parameters
+ }
+ if arp.UpgradePolicy != nil {
+ objectMap["upgradePolicy"] = arp.UpgradePolicy
+ }
+ if arp.MinimumNodes != nil {
+ objectMap["minimumNodes"] = arp.MinimumNodes
+ }
+ if arp.MaximumNodes != nil {
+ objectMap["maximumNodes"] = arp.MaximumNodes
+ }
+ if arp.RemoveApplicationCapacity != nil {
+ objectMap["removeApplicationCapacity"] = arp.RemoveApplicationCapacity
+ }
+ if arp.Metrics != nil {
+ objectMap["metrics"] = arp.Metrics
+ }
+ return json.Marshal(objectMap)
+}
+
+// ApplicationResourceUpdate the application resource for patch operations.
+type ApplicationResourceUpdate struct {
+ // ApplicationResourceUpdateProperties - The application resource properties for patch operations.
+ *ApplicationResourceUpdateProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Azure resource identifier.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Azure resource name.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Azure resource type.
+ Type *string `json:"type,omitempty"`
+ // Location - Azure resource location.
+ Location *string `json:"location,omitempty"`
+ // Tags - Azure resource tags.
+ Tags map[string]*string `json:"tags"`
+ // Etag - READ-ONLY; Azure resource etag.
+ Etag *string `json:"etag,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ApplicationResourceUpdate.
+func (aru ApplicationResourceUpdate) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if aru.ApplicationResourceUpdateProperties != nil {
+ objectMap["properties"] = aru.ApplicationResourceUpdateProperties
+ }
+ if aru.Location != nil {
+ objectMap["location"] = aru.Location
+ }
+ if aru.Tags != nil {
+ objectMap["tags"] = aru.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ApplicationResourceUpdate struct.
+func (aru *ApplicationResourceUpdate) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var applicationResourceUpdateProperties ApplicationResourceUpdateProperties
+ err = json.Unmarshal(*v, &applicationResourceUpdateProperties)
+ if err != nil {
+ return err
+ }
+ aru.ApplicationResourceUpdateProperties = &applicationResourceUpdateProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ aru.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ aru.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ aru.Type = &typeVar
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ aru.Location = &location
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ aru.Tags = tags
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ aru.Etag = &etag
+ }
+ }
+ }
+
+ return nil
+}
+
+// ApplicationResourceUpdateProperties the application resource properties for patch operations.
+type ApplicationResourceUpdateProperties struct {
+ // TypeVersion - The version of the application type as defined in the application manifest.
+ TypeVersion *string `json:"typeVersion,omitempty"`
+ // Parameters - List of application parameters with overridden values from their default values specified in the application manifest.
+ Parameters map[string]*string `json:"parameters"`
+ // UpgradePolicy - Describes the policy for a monitored application upgrade.
+ UpgradePolicy *ApplicationUpgradePolicy `json:"upgradePolicy,omitempty"`
+ // MinimumNodes - The minimum number of nodes where Service Fabric will reserve capacity for this application. Note that this does not mean that the services of this application will be placed on all of those nodes. If this property is set to zero, no capacity will be reserved. The value of this property cannot be more than the value of the MaximumNodes property.
+ MinimumNodes *int64 `json:"minimumNodes,omitempty"`
+ // MaximumNodes - The maximum number of nodes where Service Fabric will reserve capacity for this application. Note that this does not mean that the services of this application will be placed on all of those nodes. By default, the value of this property is zero and it means that the services can be placed on any node.
+ MaximumNodes *int64 `json:"maximumNodes,omitempty"`
+ // RemoveApplicationCapacity - Remove the current application capacity settings.
+ RemoveApplicationCapacity *bool `json:"removeApplicationCapacity,omitempty"`
+ // Metrics - List of application capacity metric description.
+ Metrics *[]ApplicationMetricDescription `json:"metrics,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ApplicationResourceUpdateProperties.
+func (arup ApplicationResourceUpdateProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if arup.TypeVersion != nil {
+ objectMap["typeVersion"] = arup.TypeVersion
+ }
+ if arup.Parameters != nil {
+ objectMap["parameters"] = arup.Parameters
+ }
+ if arup.UpgradePolicy != nil {
+ objectMap["upgradePolicy"] = arup.UpgradePolicy
+ }
+ if arup.MinimumNodes != nil {
+ objectMap["minimumNodes"] = arup.MinimumNodes
+ }
+ if arup.MaximumNodes != nil {
+ objectMap["maximumNodes"] = arup.MaximumNodes
+ }
+ if arup.RemoveApplicationCapacity != nil {
+ objectMap["removeApplicationCapacity"] = arup.RemoveApplicationCapacity
+ }
+ if arup.Metrics != nil {
+ objectMap["metrics"] = arup.Metrics
+ }
+ return json.Marshal(objectMap)
+}
+
+// ApplicationsCreateFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type ApplicationsCreateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *ApplicationsCreateFuture) Result(client ApplicationsClient) (ar ApplicationResource, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationsCreateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("servicefabric.ApplicationsCreateFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if ar.Response.Response, err = future.GetResult(sender); err == nil && ar.Response.Response.StatusCode != http.StatusNoContent {
+ ar, err = client.CreateResponder(ar.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationsCreateFuture", "Result", ar.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// ApplicationsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type ApplicationsDeleteFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *ApplicationsDeleteFuture) Result(client ApplicationsClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationsDeleteFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("servicefabric.ApplicationsDeleteFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// ApplicationsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type ApplicationsUpdateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *ApplicationsUpdateFuture) Result(client ApplicationsClient) (ar ApplicationResource, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationsUpdateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("servicefabric.ApplicationsUpdateFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if ar.Response.Response, err = future.GetResult(sender); err == nil && ar.Response.Response.StatusCode != http.StatusNoContent {
+ ar, err = client.UpdateResponder(ar.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationsUpdateFuture", "Result", ar.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// ApplicationTypeResource the application type name resource
+type ApplicationTypeResource struct {
+ autorest.Response `json:"-"`
+ // ApplicationTypeResourceProperties - The application type name properties
+ *ApplicationTypeResourceProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Azure resource identifier.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Azure resource name.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Azure resource type.
+ Type *string `json:"type,omitempty"`
+ // Location - Azure resource location.
+ Location *string `json:"location,omitempty"`
+ // Tags - Azure resource tags.
+ Tags map[string]*string `json:"tags"`
+ // Etag - READ-ONLY; Azure resource etag.
+ Etag *string `json:"etag,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ApplicationTypeResource.
+func (atr ApplicationTypeResource) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if atr.ApplicationTypeResourceProperties != nil {
+ objectMap["properties"] = atr.ApplicationTypeResourceProperties
+ }
+ if atr.Location != nil {
+ objectMap["location"] = atr.Location
+ }
+ if atr.Tags != nil {
+ objectMap["tags"] = atr.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ApplicationTypeResource struct.
+func (atr *ApplicationTypeResource) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var applicationTypeResourceProperties ApplicationTypeResourceProperties
+ err = json.Unmarshal(*v, &applicationTypeResourceProperties)
+ if err != nil {
+ return err
+ }
+ atr.ApplicationTypeResourceProperties = &applicationTypeResourceProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ atr.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ atr.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ atr.Type = &typeVar
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ atr.Location = &location
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ atr.Tags = tags
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ atr.Etag = &etag
+ }
+ }
+ }
+
+ return nil
+}
+
+// ApplicationTypeResourceList the list of application type names.
+type ApplicationTypeResourceList struct {
+ autorest.Response `json:"-"`
+ Value *[]ApplicationTypeResource `json:"value,omitempty"`
+ // NextLink - READ-ONLY; URL to get the next set of application type list results if there are any.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// ApplicationTypeResourceProperties the application type name properties
+type ApplicationTypeResourceProperties struct {
+ // ProvisioningState - READ-ONLY; The current deployment or provisioning state, which only appears in the response.
+ ProvisioningState *string `json:"provisioningState,omitempty"`
+}
+
+// ApplicationTypesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type ApplicationTypesDeleteFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *ApplicationTypesDeleteFuture) Result(client ApplicationTypesClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypesDeleteFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("servicefabric.ApplicationTypesDeleteFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// ApplicationTypeVersionResource an application type version resource for the specified application type
+// name resource.
+type ApplicationTypeVersionResource struct {
+ autorest.Response `json:"-"`
+ // ApplicationTypeVersionResourceProperties - The properties of the application type version resource.
+ *ApplicationTypeVersionResourceProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Azure resource identifier.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Azure resource name.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Azure resource type.
+ Type *string `json:"type,omitempty"`
+ // Location - Azure resource location.
+ Location *string `json:"location,omitempty"`
+ // Tags - Azure resource tags.
+ Tags map[string]*string `json:"tags"`
+ // Etag - READ-ONLY; Azure resource etag.
+ Etag *string `json:"etag,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ApplicationTypeVersionResource.
+func (atvr ApplicationTypeVersionResource) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if atvr.ApplicationTypeVersionResourceProperties != nil {
+ objectMap["properties"] = atvr.ApplicationTypeVersionResourceProperties
+ }
+ if atvr.Location != nil {
+ objectMap["location"] = atvr.Location
+ }
+ if atvr.Tags != nil {
+ objectMap["tags"] = atvr.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ApplicationTypeVersionResource struct.
+func (atvr *ApplicationTypeVersionResource) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var applicationTypeVersionResourceProperties ApplicationTypeVersionResourceProperties
+ err = json.Unmarshal(*v, &applicationTypeVersionResourceProperties)
+ if err != nil {
+ return err
+ }
+ atvr.ApplicationTypeVersionResourceProperties = &applicationTypeVersionResourceProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ atvr.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ atvr.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ atvr.Type = &typeVar
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ atvr.Location = &location
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ atvr.Tags = tags
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ atvr.Etag = &etag
+ }
+ }
+ }
+
+ return nil
+}
+
+// ApplicationTypeVersionResourceList the list of application type version resources for the specified
+// application type name resource.
+type ApplicationTypeVersionResourceList struct {
+ autorest.Response `json:"-"`
+ Value *[]ApplicationTypeVersionResource `json:"value,omitempty"`
+ // NextLink - READ-ONLY; URL to get the next set of application type version list results if there are any.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// ApplicationTypeVersionResourceProperties the properties of the application type version resource.
+type ApplicationTypeVersionResourceProperties struct {
+ // ProvisioningState - READ-ONLY; The current deployment or provisioning state, which only appears in the response
+ ProvisioningState *string `json:"provisioningState,omitempty"`
+ // AppPackageURL - The URL to the application package
+ AppPackageURL *string `json:"appPackageUrl,omitempty"`
+ // DefaultParameterList - READ-ONLY; List of application type parameters that can be overridden when creating or updating the application.
+ DefaultParameterList map[string]*string `json:"defaultParameterList"`
+}
+
+// MarshalJSON is the custom marshaler for ApplicationTypeVersionResourceProperties.
+func (atvrp ApplicationTypeVersionResourceProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if atvrp.AppPackageURL != nil {
+ objectMap["appPackageUrl"] = atvrp.AppPackageURL
+ }
+ return json.Marshal(objectMap)
+}
+
+// ApplicationTypeVersionsCreateFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type ApplicationTypeVersionsCreateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *ApplicationTypeVersionsCreateFuture) Result(client ApplicationTypeVersionsClient) (atvr ApplicationTypeVersionResource, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypeVersionsCreateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("servicefabric.ApplicationTypeVersionsCreateFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if atvr.Response.Response, err = future.GetResult(sender); err == nil && atvr.Response.Response.StatusCode != http.StatusNoContent {
+ atvr, err = client.CreateResponder(atvr.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypeVersionsCreateFuture", "Result", atvr.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// ApplicationTypeVersionsDeleteFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type ApplicationTypeVersionsDeleteFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *ApplicationTypeVersionsDeleteFuture) Result(client ApplicationTypeVersionsClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypeVersionsDeleteFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("servicefabric.ApplicationTypeVersionsDeleteFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// ApplicationUpgradePolicy describes the policy for a monitored application upgrade.
+type ApplicationUpgradePolicy struct {
+ // UpgradeReplicaSetCheckTimeout - The maximum amount of time to block processing of an upgrade domain and prevent loss of availability when there are unexpected issues. When this timeout expires, processing of the upgrade domain will proceed regardless of availability loss issues. The timeout is reset at the start of each upgrade domain. Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer).
+ UpgradeReplicaSetCheckTimeout *string `json:"upgradeReplicaSetCheckTimeout,omitempty"`
+ // ForceRestart - If true, then processes are forcefully restarted during upgrade even when the code version has not changed (the upgrade only changes configuration or data).
+ ForceRestart *bool `json:"forceRestart,omitempty"`
+ // RollingUpgradeMonitoringPolicy - The policy used for monitoring the application upgrade
+ RollingUpgradeMonitoringPolicy *ArmRollingUpgradeMonitoringPolicy `json:"rollingUpgradeMonitoringPolicy,omitempty"`
+ // ApplicationHealthPolicy - Defines a health policy used to evaluate the health of an application or one of its children entities.
+ ApplicationHealthPolicy *ArmApplicationHealthPolicy `json:"applicationHealthPolicy,omitempty"`
+}
+
+// ArmApplicationHealthPolicy defines a health policy used to evaluate the health of an application or one
+// of its children entities.
+type ArmApplicationHealthPolicy struct {
+ // ConsiderWarningAsError - Indicates whether warnings are treated with the same severity as errors.
+ ConsiderWarningAsError *bool `json:"considerWarningAsError,omitempty"`
+ // MaxPercentUnhealthyDeployedApplications - The maximum allowed percentage of unhealthy deployed applications. Allowed values are Byte values from zero to 100.
+ // The percentage represents the maximum tolerated percentage of deployed applications that can be unhealthy before the application is considered in error.
+ // This is calculated by dividing the number of unhealthy deployed applications over the number of nodes where the application is currently deployed on in the cluster.
+ // The computation rounds up to tolerate one failure on small numbers of nodes. Default percentage is zero.
+ MaxPercentUnhealthyDeployedApplications *int32 `json:"maxPercentUnhealthyDeployedApplications,omitempty"`
+ // DefaultServiceTypeHealthPolicy - The health policy used by default to evaluate the health of a service type.
+ DefaultServiceTypeHealthPolicy *ArmServiceTypeHealthPolicy `json:"defaultServiceTypeHealthPolicy,omitempty"`
+ // ServiceTypeHealthPolicyMap - The map with service type health policy per service type name. The map is empty by default.
+ ServiceTypeHealthPolicyMap map[string]*ArmServiceTypeHealthPolicy `json:"serviceTypeHealthPolicyMap"`
+}
+
+// MarshalJSON is the custom marshaler for ArmApplicationHealthPolicy.
+func (aahp ArmApplicationHealthPolicy) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if aahp.ConsiderWarningAsError != nil {
+ objectMap["considerWarningAsError"] = aahp.ConsiderWarningAsError
+ }
+ if aahp.MaxPercentUnhealthyDeployedApplications != nil {
+ objectMap["maxPercentUnhealthyDeployedApplications"] = aahp.MaxPercentUnhealthyDeployedApplications
+ }
+ if aahp.DefaultServiceTypeHealthPolicy != nil {
+ objectMap["defaultServiceTypeHealthPolicy"] = aahp.DefaultServiceTypeHealthPolicy
+ }
+ if aahp.ServiceTypeHealthPolicyMap != nil {
+ objectMap["serviceTypeHealthPolicyMap"] = aahp.ServiceTypeHealthPolicyMap
+ }
+ return json.Marshal(objectMap)
+}
+
+// ArmRollingUpgradeMonitoringPolicy the policy used for monitoring the application upgrade
+type ArmRollingUpgradeMonitoringPolicy struct {
+ // FailureAction - The activation Mode of the service package. Possible values include: 'Rollback', 'Manual'
+ FailureAction ArmUpgradeFailureAction `json:"failureAction,omitempty"`
+ // HealthCheckWaitDuration - The amount of time to wait after completing an upgrade domain before applying health policies. It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds.
+ HealthCheckWaitDuration *string `json:"healthCheckWaitDuration,omitempty"`
+ // HealthCheckStableDuration - The amount of time that the application or cluster must remain healthy before the upgrade proceeds to the next upgrade domain. It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds.
+ HealthCheckStableDuration *string `json:"healthCheckStableDuration,omitempty"`
+ // HealthCheckRetryTimeout - The amount of time to retry health evaluation when the application or cluster is unhealthy before FailureAction is executed. It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds.
+ HealthCheckRetryTimeout *string `json:"healthCheckRetryTimeout,omitempty"`
+ // UpgradeTimeout - The amount of time the overall upgrade has to complete before FailureAction is executed. It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds.
+ UpgradeTimeout *string `json:"upgradeTimeout,omitempty"`
+ // UpgradeDomainTimeout - The amount of time each upgrade domain has to complete before FailureAction is executed. It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds.
+ UpgradeDomainTimeout *string `json:"upgradeDomainTimeout,omitempty"`
+}
+
+// ArmServiceTypeHealthPolicy represents the health policy used to evaluate the health of services
+// belonging to a service type.
+type ArmServiceTypeHealthPolicy struct {
+ // MaxPercentUnhealthyServices - The maximum percentage of services allowed to be unhealthy before your application is considered in error.
+ MaxPercentUnhealthyServices *int32 `json:"maxPercentUnhealthyServices,omitempty"`
+ // MaxPercentUnhealthyPartitionsPerService - The maximum percentage of partitions per service allowed to be unhealthy before your application is considered in error.
+ MaxPercentUnhealthyPartitionsPerService *int32 `json:"maxPercentUnhealthyPartitionsPerService,omitempty"`
+ // MaxPercentUnhealthyReplicasPerPartition - The maximum percentage of replicas per partition allowed to be unhealthy before your application is considered in error.
+ MaxPercentUnhealthyReplicasPerPartition *int32 `json:"maxPercentUnhealthyReplicasPerPartition,omitempty"`
+}
+
+// AvailableOperationDisplay operation supported by the Service Fabric resource provider
+type AvailableOperationDisplay struct {
+ // Provider - The name of the provider.
+ Provider *string `json:"provider,omitempty"`
+ // Resource - The resource on which the operation is performed
+ Resource *string `json:"resource,omitempty"`
+ // Operation - The operation that can be performed.
+ Operation *string `json:"operation,omitempty"`
+ // Description - Operation description
+ Description *string `json:"description,omitempty"`
+}
+
+// AzureActiveDirectory the settings to enable AAD authentication on the cluster.
+type AzureActiveDirectory struct {
+ // TenantID - Azure active directory tenant id.
+ TenantID *string `json:"tenantId,omitempty"`
+ // ClusterApplication - Azure active directory cluster application id.
+ ClusterApplication *string `json:"clusterApplication,omitempty"`
+ // ClientApplication - Azure active directory client application id.
+ ClientApplication *string `json:"clientApplication,omitempty"`
+}
+
+// CertificateDescription describes the certificate details.
+type CertificateDescription struct {
+ // Thumbprint - Thumbprint of the primary certificate.
+ Thumbprint *string `json:"thumbprint,omitempty"`
+ // ThumbprintSecondary - Thumbprint of the secondary certificate.
+ ThumbprintSecondary *string `json:"thumbprintSecondary,omitempty"`
+ // X509StoreName - The local certificate store location. Possible values include: 'AddressBook', 'AuthRoot', 'CertificateAuthority', 'Disallowed', 'My', 'Root', 'TrustedPeople', 'TrustedPublisher'
+ X509StoreName X509StoreName `json:"x509StoreName,omitempty"`
+}
+
+// ClientCertificateCommonName describes the client certificate details using common name.
+type ClientCertificateCommonName struct {
+ // IsAdmin - Indicates if the client certificate has admin access to the cluster. Non admin clients can perform only read only operations on the cluster.
+ IsAdmin *bool `json:"isAdmin,omitempty"`
+ // CertificateCommonName - The common name of the client certificate.
+ CertificateCommonName *string `json:"certificateCommonName,omitempty"`
+ // CertificateIssuerThumbprint - The issuer thumbprint of the client certificate.
+ CertificateIssuerThumbprint *string `json:"certificateIssuerThumbprint,omitempty"`
+}
+
+// ClientCertificateThumbprint describes the client certificate details using thumbprint.
+type ClientCertificateThumbprint struct {
+ // IsAdmin - Indicates if the client certificate has admin access to the cluster. Non admin clients can perform only read only operations on the cluster.
+ IsAdmin *bool `json:"isAdmin,omitempty"`
+ // CertificateThumbprint - The thumbprint of the client certificate.
+ CertificateThumbprint *string `json:"certificateThumbprint,omitempty"`
+}
+
+// Cluster the cluster resource
+type Cluster struct {
+ autorest.Response `json:"-"`
+ // ClusterProperties - The cluster resource properties
+ *ClusterProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Azure resource identifier.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Azure resource name.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Azure resource type.
+ Type *string `json:"type,omitempty"`
+ // Location - Azure resource location.
+ Location *string `json:"location,omitempty"`
+ // Tags - Azure resource tags.
+ Tags map[string]*string `json:"tags"`
+ // Etag - READ-ONLY; Azure resource etag.
+ Etag *string `json:"etag,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for Cluster.
+func (c Cluster) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if c.ClusterProperties != nil {
+ objectMap["properties"] = c.ClusterProperties
+ }
+ if c.Location != nil {
+ objectMap["location"] = c.Location
+ }
+ if c.Tags != nil {
+ objectMap["tags"] = c.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for Cluster struct.
+func (c *Cluster) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var clusterProperties ClusterProperties
+ err = json.Unmarshal(*v, &clusterProperties)
+ if err != nil {
+ return err
+ }
+ c.ClusterProperties = &clusterProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ c.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ c.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ c.Type = &typeVar
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ c.Location = &location
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ c.Tags = tags
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ c.Etag = &etag
+ }
+ }
+ }
+
+ return nil
+}
+
+// ClusterCodeVersionsListResult the list results of the Service Fabric runtime versions.
+type ClusterCodeVersionsListResult struct {
+ autorest.Response `json:"-"`
+ Value *[]ClusterCodeVersionsResult `json:"value,omitempty"`
+ // NextLink - The URL to use for getting the next set of results.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// ClusterCodeVersionsResult the result of the Service Fabric runtime versions
+type ClusterCodeVersionsResult struct {
+ // ID - The identification of the result
+ ID *string `json:"id,omitempty"`
+ // Name - The name of the result
+ Name *string `json:"name,omitempty"`
+ // Type - The result resource type
+ Type *string `json:"type,omitempty"`
+ // ClusterVersionDetails - The detail of the Service Fabric runtime version result
+ *ClusterVersionDetails `json:"properties,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ClusterCodeVersionsResult.
+func (ccvr ClusterCodeVersionsResult) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if ccvr.ID != nil {
+ objectMap["id"] = ccvr.ID
+ }
+ if ccvr.Name != nil {
+ objectMap["name"] = ccvr.Name
+ }
+ if ccvr.Type != nil {
+ objectMap["type"] = ccvr.Type
+ }
+ if ccvr.ClusterVersionDetails != nil {
+ objectMap["properties"] = ccvr.ClusterVersionDetails
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ClusterCodeVersionsResult struct.
+func (ccvr *ClusterCodeVersionsResult) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ ccvr.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ ccvr.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ ccvr.Type = &typeVar
+ }
+ case "properties":
+ if v != nil {
+ var clusterVersionDetails ClusterVersionDetails
+ err = json.Unmarshal(*v, &clusterVersionDetails)
+ if err != nil {
+ return err
+ }
+ ccvr.ClusterVersionDetails = &clusterVersionDetails
+ }
+ }
+ }
+
+ return nil
+}
+
+// ClusterHealthPolicy defines a health policy used to evaluate the health of the cluster or of a cluster
+// node.
+type ClusterHealthPolicy struct {
+ // MaxPercentUnhealthyNodes - The maximum allowed percentage of unhealthy nodes before reporting an error. For example, to allow 10% of nodes to be unhealthy, this value would be 10.
+ // The percentage represents the maximum tolerated percentage of nodes that can be unhealthy before the cluster is considered in error.
+ // If the percentage is respected but there is at least one unhealthy node, the health is evaluated as Warning.
+ // The percentage is calculated by dividing the number of unhealthy nodes over the total number of nodes in the cluster.
+ // The computation rounds up to tolerate one failure on small numbers of nodes. Default percentage is zero.
+ // In large clusters, some nodes will always be down or out for repairs, so this percentage should be configured to tolerate that.
+ MaxPercentUnhealthyNodes *int32 `json:"maxPercentUnhealthyNodes,omitempty"`
+ // MaxPercentUnhealthyApplications - The maximum allowed percentage of unhealthy applications before reporting an error. For example, to allow 10% of applications to be unhealthy, this value would be 10.
+ // The percentage represents the maximum tolerated percentage of applications that can be unhealthy before the cluster is considered in error.
+ // If the percentage is respected but there is at least one unhealthy application, the health is evaluated as Warning.
+ // This is calculated by dividing the number of unhealthy applications over the total number of application instances in the cluster, excluding applications of application types that are included in the ApplicationTypeHealthPolicyMap.
+ // The computation rounds up to tolerate one failure on small numbers of applications. Default percentage is zero.
+ MaxPercentUnhealthyApplications *int32 `json:"maxPercentUnhealthyApplications,omitempty"`
+ // ApplicationHealthPolicies - Defines the application health policy map used to evaluate the health of an application or one of its children entities.
+ ApplicationHealthPolicies map[string]*ApplicationHealthPolicy `json:"applicationHealthPolicies"`
+}
+
+// MarshalJSON is the custom marshaler for ClusterHealthPolicy.
+func (chp ClusterHealthPolicy) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if chp.MaxPercentUnhealthyNodes != nil {
+ objectMap["maxPercentUnhealthyNodes"] = chp.MaxPercentUnhealthyNodes
+ }
+ if chp.MaxPercentUnhealthyApplications != nil {
+ objectMap["maxPercentUnhealthyApplications"] = chp.MaxPercentUnhealthyApplications
+ }
+ if chp.ApplicationHealthPolicies != nil {
+ objectMap["applicationHealthPolicies"] = chp.ApplicationHealthPolicies
+ }
+ return json.Marshal(objectMap)
+}
+
+// ClusterListResult cluster list results
+type ClusterListResult struct {
+ autorest.Response `json:"-"`
+ Value *[]Cluster `json:"value,omitempty"`
+ // NextLink - The URL to use for getting the next set of results.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// ClusterProperties describes the cluster resource properties.
+type ClusterProperties struct {
+ // AddOnFeatures - The list of add-on features to enable in the cluster.
+ AddOnFeatures *[]string `json:"addOnFeatures,omitempty"`
+ // AvailableClusterVersions - READ-ONLY; The Service Fabric runtime versions available for this cluster.
+ AvailableClusterVersions *[]ClusterVersionDetails `json:"availableClusterVersions,omitempty"`
+ // AzureActiveDirectory - The AAD authentication settings of the cluster.
+ AzureActiveDirectory *AzureActiveDirectory `json:"azureActiveDirectory,omitempty"`
+ // Certificate - The certificate to use for securing the cluster. The certificate provided will be used for node to node security within the cluster, SSL certificate for cluster management endpoint and default admin client.
+ Certificate *CertificateDescription `json:"certificate,omitempty"`
+ // CertificateCommonNames - Describes a list of server certificates referenced by common name that are used to secure the cluster.
+ CertificateCommonNames *ServerCertificateCommonNames `json:"certificateCommonNames,omitempty"`
+ // ClientCertificateCommonNames - The list of client certificates referenced by common name that are allowed to manage the cluster.
+ ClientCertificateCommonNames *[]ClientCertificateCommonName `json:"clientCertificateCommonNames,omitempty"`
+ // ClientCertificateThumbprints - The list of client certificates referenced by thumbprint that are allowed to manage the cluster.
+ ClientCertificateThumbprints *[]ClientCertificateThumbprint `json:"clientCertificateThumbprints,omitempty"`
+ // ClusterCodeVersion - The Service Fabric runtime version of the cluster. This property can only by set the user when **upgradeMode** is set to 'Manual'. To get list of available Service Fabric versions for new clusters use [ClusterVersion API](./ClusterVersion.md). To get the list of available version for existing clusters use **availableClusterVersions**.
+ ClusterCodeVersion *string `json:"clusterCodeVersion,omitempty"`
+ // ClusterEndpoint - READ-ONLY; The Azure Resource Provider endpoint. A system service in the cluster connects to this endpoint.
+ ClusterEndpoint *string `json:"clusterEndpoint,omitempty"`
+ // ClusterID - READ-ONLY; A service generated unique identifier for the cluster resource.
+ ClusterID *string `json:"clusterId,omitempty"`
+ // ClusterState - READ-ONLY; The current state of the cluster.
+ // - WaitingForNodes - Indicates that the cluster resource is created and the resource provider is waiting for Service Fabric VM extension to boot up and report to it.
+ // - Deploying - Indicates that the Service Fabric runtime is being installed on the VMs. Cluster resource will be in this state until the cluster boots up and system services are up.
+ // - BaselineUpgrade - Indicates that the cluster is upgrading to establishes the cluster version. This upgrade is automatically initiated when the cluster boots up for the first time.
+ // - UpdatingUserConfiguration - Indicates that the cluster is being upgraded with the user provided configuration.
+ // - UpdatingUserCertificate - Indicates that the cluster is being upgraded with the user provided certificate.
+ // - UpdatingInfrastructure - Indicates that the cluster is being upgraded with the latest Service Fabric runtime version. This happens only when the **upgradeMode** is set to 'Automatic'.
+ // - EnforcingClusterVersion - Indicates that cluster is on a different version than expected and the cluster is being upgraded to the expected version.
+ // - UpgradeServiceUnreachable - Indicates that the system service in the cluster is no longer polling the Resource Provider. Clusters in this state cannot be managed by the Resource Provider.
+ // - AutoScale - Indicates that the ReliabilityLevel of the cluster is being adjusted.
+ // - Ready - Indicates that the cluster is in a stable state.
+ // . Possible values include: 'WaitingForNodes', 'Deploying', 'BaselineUpgrade', 'UpdatingUserConfiguration', 'UpdatingUserCertificate', 'UpdatingInfrastructure', 'EnforcingClusterVersion', 'UpgradeServiceUnreachable', 'AutoScale', 'Ready'
+ ClusterState ClusterState `json:"clusterState,omitempty"`
+ // DiagnosticsStorageAccountConfig - The storage account information for storing Service Fabric diagnostic logs.
+ DiagnosticsStorageAccountConfig *DiagnosticsStorageAccountConfig `json:"diagnosticsStorageAccountConfig,omitempty"`
+ // EventStoreServiceEnabled - Indicates if the event store service is enabled.
+ EventStoreServiceEnabled *bool `json:"eventStoreServiceEnabled,omitempty"`
+ // FabricSettings - The list of custom fabric settings to configure the cluster.
+ FabricSettings *[]SettingsSectionDescription `json:"fabricSettings,omitempty"`
+ // ManagementEndpoint - The http management endpoint of the cluster.
+ ManagementEndpoint *string `json:"managementEndpoint,omitempty"`
+ // NodeTypes - The list of node types in the cluster.
+ NodeTypes *[]NodeTypeDescription `json:"nodeTypes,omitempty"`
+ // ProvisioningState - READ-ONLY; The provisioning state of the cluster resource. Possible values include: 'Updating', 'Succeeded', 'Failed', 'Canceled'
+ ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
+ // ReliabilityLevel - The reliability level sets the replica set size of system services. Learn about [ReliabilityLevel](https://docs.microsoft.com/en-us/azure/service-fabric/service-fabric-cluster-capacity).
+ // - None - Run the System services with a target replica set count of 1. This should only be used for test clusters.
+ // - Bronze - Run the System services with a target replica set count of 3. This should only be used for test clusters.
+ // - Silver - Run the System services with a target replica set count of 5.
+ // - Gold - Run the System services with a target replica set count of 7.
+ // - Platinum - Run the System services with a target replica set count of 9.
+ // . Possible values include: 'ReliabilityLevelNone', 'ReliabilityLevelBronze', 'ReliabilityLevelSilver', 'ReliabilityLevelGold', 'ReliabilityLevelPlatinum'
+ ReliabilityLevel ReliabilityLevel `json:"reliabilityLevel,omitempty"`
+ // ReverseProxyCertificate - The server certificate used by reverse proxy.
+ ReverseProxyCertificate *CertificateDescription `json:"reverseProxyCertificate,omitempty"`
+ // ReverseProxyCertificateCommonNames - Describes a list of server certificates referenced by common name that are used to secure the cluster.
+ ReverseProxyCertificateCommonNames *ServerCertificateCommonNames `json:"reverseProxyCertificateCommonNames,omitempty"`
+ // UpgradeDescription - The policy to use when upgrading the cluster.
+ UpgradeDescription *ClusterUpgradePolicy `json:"upgradeDescription,omitempty"`
+ // UpgradeMode - The upgrade mode of the cluster when new Service Fabric runtime version is available.
+ // - Automatic - The cluster will be automatically upgraded to the latest Service Fabric runtime version as soon as it is available.
+ // - Manual - The cluster will not be automatically upgraded to the latest Service Fabric runtime version. The cluster is upgraded by setting the **clusterCodeVersion** property in the cluster resource.
+ // . Possible values include: 'UpgradeModeAutomatic', 'UpgradeModeManual'
+ UpgradeMode UpgradeMode `json:"upgradeMode,omitempty"`
+ // VMImage - The VM image VMSS has been configured with. Generic names such as Windows or Linux can be used.
+ VMImage *string `json:"vmImage,omitempty"`
+}
+
+// ClusterPropertiesUpdateParameters describes the cluster resource properties that can be updated during
+// PATCH operation.
+type ClusterPropertiesUpdateParameters struct {
+ // AddOnFeatures - The list of add-on features to enable in the cluster.
+ AddOnFeatures *[]string `json:"addOnFeatures,omitempty"`
+ // Certificate - The certificate to use for securing the cluster. The certificate provided will be used for node to node security within the cluster, SSL certificate for cluster management endpoint and default admin client.
+ Certificate *CertificateDescription `json:"certificate,omitempty"`
+ // CertificateCommonNames - Describes a list of server certificates referenced by common name that are used to secure the cluster.
+ CertificateCommonNames *ServerCertificateCommonNames `json:"certificateCommonNames,omitempty"`
+ // ClientCertificateCommonNames - The list of client certificates referenced by common name that are allowed to manage the cluster. This will overwrite the existing list.
+ ClientCertificateCommonNames *[]ClientCertificateCommonName `json:"clientCertificateCommonNames,omitempty"`
+ // ClientCertificateThumbprints - The list of client certificates referenced by thumbprint that are allowed to manage the cluster. This will overwrite the existing list.
+ ClientCertificateThumbprints *[]ClientCertificateThumbprint `json:"clientCertificateThumbprints,omitempty"`
+ // ClusterCodeVersion - The Service Fabric runtime version of the cluster. This property can only by set the user when **upgradeMode** is set to 'Manual'. To get list of available Service Fabric versions for new clusters use [ClusterVersion API](./ClusterVersion.md). To get the list of available version for existing clusters use **availableClusterVersions**.
+ ClusterCodeVersion *string `json:"clusterCodeVersion,omitempty"`
+ // EventStoreServiceEnabled - Indicates if the event store service is enabled.
+ EventStoreServiceEnabled *bool `json:"eventStoreServiceEnabled,omitempty"`
+ // FabricSettings - The list of custom fabric settings to configure the cluster. This will overwrite the existing list.
+ FabricSettings *[]SettingsSectionDescription `json:"fabricSettings,omitempty"`
+ // NodeTypes - The list of node types in the cluster. This will overwrite the existing list.
+ NodeTypes *[]NodeTypeDescription `json:"nodeTypes,omitempty"`
+ // ReliabilityLevel - The reliability level sets the replica set size of system services. Learn about [ReliabilityLevel](https://docs.microsoft.com/en-us/azure/service-fabric/service-fabric-cluster-capacity).
+ // - None - Run the System services with a target replica set count of 1. This should only be used for test clusters.
+ // - Bronze - Run the System services with a target replica set count of 3. This should only be used for test clusters.
+ // - Silver - Run the System services with a target replica set count of 5.
+ // - Gold - Run the System services with a target replica set count of 7.
+ // - Platinum - Run the System services with a target replica set count of 9.
+ // . Possible values include: 'ReliabilityLevel1None', 'ReliabilityLevel1Bronze', 'ReliabilityLevel1Silver', 'ReliabilityLevel1Gold', 'ReliabilityLevel1Platinum'
+ ReliabilityLevel ReliabilityLevel1 `json:"reliabilityLevel,omitempty"`
+ // ReverseProxyCertificate - The server certificate used by reverse proxy.
+ ReverseProxyCertificate *CertificateDescription `json:"reverseProxyCertificate,omitempty"`
+ // UpgradeDescription - The policy to use when upgrading the cluster.
+ UpgradeDescription *ClusterUpgradePolicy `json:"upgradeDescription,omitempty"`
+ // UpgradeMode - The upgrade mode of the cluster when new Service Fabric runtime version is available.
+ // - Automatic - The cluster will be automatically upgraded to the latest Service Fabric runtime version as soon as it is available.
+ // - Manual - The cluster will not be automatically upgraded to the latest Service Fabric runtime version. The cluster is upgraded by setting the **clusterCodeVersion** property in the cluster resource.
+ // . Possible values include: 'UpgradeMode1Automatic', 'UpgradeMode1Manual'
+ UpgradeMode UpgradeMode1 `json:"upgradeMode,omitempty"`
+}
+
+// ClustersCreateFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type ClustersCreateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *ClustersCreateFuture) Result(client ClustersClient) (c Cluster, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ClustersCreateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("servicefabric.ClustersCreateFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if c.Response.Response, err = future.GetResult(sender); err == nil && c.Response.Response.StatusCode != http.StatusNoContent {
+ c, err = client.CreateResponder(c.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ClustersCreateFuture", "Result", c.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// ClustersUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type ClustersUpdateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *ClustersUpdateFuture) Result(client ClustersClient) (c Cluster, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ClustersUpdateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("servicefabric.ClustersUpdateFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if c.Response.Response, err = future.GetResult(sender); err == nil && c.Response.Response.StatusCode != http.StatusNoContent {
+ c, err = client.UpdateResponder(c.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ClustersUpdateFuture", "Result", c.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// ClusterUpdateParameters cluster update request
+type ClusterUpdateParameters struct {
+ // ClusterPropertiesUpdateParameters - Describes the cluster resource properties that can be updated during PATCH operation.
+ *ClusterPropertiesUpdateParameters `json:"properties,omitempty"`
+ // Tags - Cluster update parameters
+ Tags map[string]*string `json:"tags"`
+}
+
+// MarshalJSON is the custom marshaler for ClusterUpdateParameters.
+func (cup ClusterUpdateParameters) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if cup.ClusterPropertiesUpdateParameters != nil {
+ objectMap["properties"] = cup.ClusterPropertiesUpdateParameters
+ }
+ if cup.Tags != nil {
+ objectMap["tags"] = cup.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ClusterUpdateParameters struct.
+func (cup *ClusterUpdateParameters) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var clusterPropertiesUpdateParameters ClusterPropertiesUpdateParameters
+ err = json.Unmarshal(*v, &clusterPropertiesUpdateParameters)
+ if err != nil {
+ return err
+ }
+ cup.ClusterPropertiesUpdateParameters = &clusterPropertiesUpdateParameters
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ cup.Tags = tags
+ }
+ }
+ }
+
+ return nil
+}
+
+// ClusterUpgradeDeltaHealthPolicy describes the delta health policies for the cluster upgrade.
+type ClusterUpgradeDeltaHealthPolicy struct {
+ // MaxPercentDeltaUnhealthyNodes - The maximum allowed percentage of nodes health degradation allowed during cluster upgrades.
+ // The delta is measured between the state of the nodes at the beginning of upgrade and the state of the nodes at the time of the health evaluation.
+ // The check is performed after every upgrade domain upgrade completion to make sure the global state of the cluster is within tolerated limits.
+ MaxPercentDeltaUnhealthyNodes *int32 `json:"maxPercentDeltaUnhealthyNodes,omitempty"`
+ // MaxPercentUpgradeDomainDeltaUnhealthyNodes - The maximum allowed percentage of upgrade domain nodes health degradation allowed during cluster upgrades.
+ // The delta is measured between the state of the upgrade domain nodes at the beginning of upgrade and the state of the upgrade domain nodes at the time of the health evaluation.
+ // The check is performed after every upgrade domain upgrade completion for all completed upgrade domains to make sure the state of the upgrade domains is within tolerated limits.
+ MaxPercentUpgradeDomainDeltaUnhealthyNodes *int32 `json:"maxPercentUpgradeDomainDeltaUnhealthyNodes,omitempty"`
+ // MaxPercentDeltaUnhealthyApplications - The maximum allowed percentage of applications health degradation allowed during cluster upgrades.
+ // The delta is measured between the state of the applications at the beginning of upgrade and the state of the applications at the time of the health evaluation.
+ // The check is performed after every upgrade domain upgrade completion to make sure the global state of the cluster is within tolerated limits. System services are not included in this.
+ MaxPercentDeltaUnhealthyApplications *int32 `json:"maxPercentDeltaUnhealthyApplications,omitempty"`
+ // ApplicationDeltaHealthPolicies - Defines the application delta health policy map used to evaluate the health of an application or one of its child entities when upgrading the cluster.
+ ApplicationDeltaHealthPolicies map[string]*ApplicationDeltaHealthPolicy `json:"applicationDeltaHealthPolicies"`
+}
+
+// MarshalJSON is the custom marshaler for ClusterUpgradeDeltaHealthPolicy.
+func (cudhp ClusterUpgradeDeltaHealthPolicy) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if cudhp.MaxPercentDeltaUnhealthyNodes != nil {
+ objectMap["maxPercentDeltaUnhealthyNodes"] = cudhp.MaxPercentDeltaUnhealthyNodes
+ }
+ if cudhp.MaxPercentUpgradeDomainDeltaUnhealthyNodes != nil {
+ objectMap["maxPercentUpgradeDomainDeltaUnhealthyNodes"] = cudhp.MaxPercentUpgradeDomainDeltaUnhealthyNodes
+ }
+ if cudhp.MaxPercentDeltaUnhealthyApplications != nil {
+ objectMap["maxPercentDeltaUnhealthyApplications"] = cudhp.MaxPercentDeltaUnhealthyApplications
+ }
+ if cudhp.ApplicationDeltaHealthPolicies != nil {
+ objectMap["applicationDeltaHealthPolicies"] = cudhp.ApplicationDeltaHealthPolicies
+ }
+ return json.Marshal(objectMap)
+}
+
+// ClusterUpgradePolicy describes the policy used when upgrading the cluster.
+type ClusterUpgradePolicy struct {
+ // ForceRestart - If true, then processes are forcefully restarted during upgrade even when the code version has not changed (the upgrade only changes configuration or data).
+ ForceRestart *bool `json:"forceRestart,omitempty"`
+ // UpgradeReplicaSetCheckTimeout - The maximum amount of time to block processing of an upgrade domain and prevent loss of availability when there are unexpected issues. When this timeout expires, processing of the upgrade domain will proceed regardless of availability loss issues. The timeout is reset at the start of each upgrade domain. The timeout can be in either hh:mm:ss or in d.hh:mm:ss.ms format.
+ UpgradeReplicaSetCheckTimeout *string `json:"upgradeReplicaSetCheckTimeout,omitempty"`
+ // HealthCheckWaitDuration - The length of time to wait after completing an upgrade domain before performing health checks. The duration can be in either hh:mm:ss or in d.hh:mm:ss.ms format.
+ HealthCheckWaitDuration *string `json:"healthCheckWaitDuration,omitempty"`
+ // HealthCheckStableDuration - The amount of time that the application or cluster must remain healthy before the upgrade proceeds to the next upgrade domain. The duration can be in either hh:mm:ss or in d.hh:mm:ss.ms format.
+ HealthCheckStableDuration *string `json:"healthCheckStableDuration,omitempty"`
+ // HealthCheckRetryTimeout - The amount of time to retry health evaluation when the application or cluster is unhealthy before the upgrade rolls back. The timeout can be in either hh:mm:ss or in d.hh:mm:ss.ms format.
+ HealthCheckRetryTimeout *string `json:"healthCheckRetryTimeout,omitempty"`
+ // UpgradeTimeout - The amount of time the overall upgrade has to complete before the upgrade rolls back. The timeout can be in either hh:mm:ss or in d.hh:mm:ss.ms format.
+ UpgradeTimeout *string `json:"upgradeTimeout,omitempty"`
+ // UpgradeDomainTimeout - The amount of time each upgrade domain has to complete before the upgrade rolls back. The timeout can be in either hh:mm:ss or in d.hh:mm:ss.ms format.
+ UpgradeDomainTimeout *string `json:"upgradeDomainTimeout,omitempty"`
+ // HealthPolicy - The cluster health policy used when upgrading the cluster.
+ HealthPolicy *ClusterHealthPolicy `json:"healthPolicy,omitempty"`
+ // DeltaHealthPolicy - The cluster delta health policy used when upgrading the cluster.
+ DeltaHealthPolicy *ClusterUpgradeDeltaHealthPolicy `json:"deltaHealthPolicy,omitempty"`
+}
+
+// ClusterVersionDetails the detail of the Service Fabric runtime version result
+type ClusterVersionDetails struct {
+ // CodeVersion - The Service Fabric runtime version of the cluster.
+ CodeVersion *string `json:"codeVersion,omitempty"`
+ // SupportExpiryUtc - The date of expiry of support of the version.
+ SupportExpiryUtc *string `json:"supportExpiryUtc,omitempty"`
+ // Environment - Indicates if this version is for Windows or Linux operating system. Possible values include: 'Windows', 'Linux'
+ Environment Environment `json:"environment,omitempty"`
+}
+
+// DiagnosticsStorageAccountConfig the storage account information for storing Service Fabric diagnostic
+// logs.
+type DiagnosticsStorageAccountConfig struct {
+ // StorageAccountName - The Azure storage account name.
+ StorageAccountName *string `json:"storageAccountName,omitempty"`
+ // ProtectedAccountKeyName - The protected diagnostics storage key name.
+ ProtectedAccountKeyName *string `json:"protectedAccountKeyName,omitempty"`
+ // BlobEndpoint - The blob endpoint of the azure storage account.
+ BlobEndpoint *string `json:"blobEndpoint,omitempty"`
+ // QueueEndpoint - The queue endpoint of the azure storage account.
+ QueueEndpoint *string `json:"queueEndpoint,omitempty"`
+ // TableEndpoint - The table endpoint of the azure storage account.
+ TableEndpoint *string `json:"tableEndpoint,omitempty"`
+}
+
+// EndpointRangeDescription port range details
+type EndpointRangeDescription struct {
+ // StartPort - Starting port of a range of ports
+ StartPort *int32 `json:"startPort,omitempty"`
+ // EndPort - End port of a range of ports
+ EndPort *int32 `json:"endPort,omitempty"`
+}
+
+// ErrorModel the structure of the error.
+type ErrorModel struct {
+ // Error - The error details.
+ Error *ErrorModelError `json:"error,omitempty"`
+}
+
+// ErrorModelError the error details.
+type ErrorModelError struct {
+ // Code - The error code.
+ Code *string `json:"code,omitempty"`
+ // Message - The error message.
+ Message *string `json:"message,omitempty"`
+}
+
+// NamedPartitionSchemeDescription describes the named partition scheme of the service.
+type NamedPartitionSchemeDescription struct {
+ // Count - The number of partitions.
+ Count *int32 `json:"Count,omitempty"`
+ // Names - Array of size specified by the ‘Count’ parameter, for the names of the partitions.
+ Names *[]string `json:"Names,omitempty"`
+ // PartitionScheme - Possible values include: 'PartitionSchemePartitionSchemeDescription', 'PartitionSchemeNamed', 'PartitionSchemeSingleton', 'PartitionSchemeUniformInt64Range'
+ PartitionScheme PartitionSchemeBasicPartitionSchemeDescription `json:"partitionScheme,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for NamedPartitionSchemeDescription.
+func (npsd NamedPartitionSchemeDescription) MarshalJSON() ([]byte, error) {
+ npsd.PartitionScheme = PartitionSchemeNamed
+ objectMap := make(map[string]interface{})
+ if npsd.Count != nil {
+ objectMap["Count"] = npsd.Count
+ }
+ if npsd.Names != nil {
+ objectMap["Names"] = npsd.Names
+ }
+ if npsd.PartitionScheme != "" {
+ objectMap["partitionScheme"] = npsd.PartitionScheme
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsNamedPartitionSchemeDescription is the BasicPartitionSchemeDescription implementation for NamedPartitionSchemeDescription.
+func (npsd NamedPartitionSchemeDescription) AsNamedPartitionSchemeDescription() (*NamedPartitionSchemeDescription, bool) {
+ return &npsd, true
+}
+
+// AsSingletonPartitionSchemeDescription is the BasicPartitionSchemeDescription implementation for NamedPartitionSchemeDescription.
+func (npsd NamedPartitionSchemeDescription) AsSingletonPartitionSchemeDescription() (*SingletonPartitionSchemeDescription, bool) {
+ return nil, false
+}
+
+// AsUniformInt64RangePartitionSchemeDescription is the BasicPartitionSchemeDescription implementation for NamedPartitionSchemeDescription.
+func (npsd NamedPartitionSchemeDescription) AsUniformInt64RangePartitionSchemeDescription() (*UniformInt64RangePartitionSchemeDescription, bool) {
+ return nil, false
+}
+
+// AsPartitionSchemeDescription is the BasicPartitionSchemeDescription implementation for NamedPartitionSchemeDescription.
+func (npsd NamedPartitionSchemeDescription) AsPartitionSchemeDescription() (*PartitionSchemeDescription, bool) {
+ return nil, false
+}
+
+// AsBasicPartitionSchemeDescription is the BasicPartitionSchemeDescription implementation for NamedPartitionSchemeDescription.
+func (npsd NamedPartitionSchemeDescription) AsBasicPartitionSchemeDescription() (BasicPartitionSchemeDescription, bool) {
+ return &npsd, true
+}
+
+// NodeTypeDescription describes a node type in the cluster, each node type represents sub set of nodes in
+// the cluster.
+type NodeTypeDescription struct {
+ // Name - The name of the node type.
+ Name *string `json:"name,omitempty"`
+ // PlacementProperties - The placement tags applied to nodes in the node type, which can be used to indicate where certain services (workload) should run.
+ PlacementProperties map[string]*string `json:"placementProperties"`
+ // Capacities - The capacity tags applied to the nodes in the node type, the cluster resource manager uses these tags to understand how much resource a node has.
+ Capacities map[string]*string `json:"capacities"`
+ // ClientConnectionEndpointPort - The TCP cluster management endpoint port.
+ ClientConnectionEndpointPort *int32 `json:"clientConnectionEndpointPort,omitempty"`
+ // HTTPGatewayEndpointPort - The HTTP cluster management endpoint port.
+ HTTPGatewayEndpointPort *int32 `json:"httpGatewayEndpointPort,omitempty"`
+ // DurabilityLevel - The durability level of the node type. Learn about [DurabilityLevel](https://docs.microsoft.com/en-us/azure/service-fabric/service-fabric-cluster-capacity).
+ // - Bronze - No privileges. This is the default.
+ // - Silver - The infrastructure jobs can be paused for a duration of 10 minutes per UD.
+ // - Gold - The infrastructure jobs can be paused for a duration of 2 hours per UD. Gold durability can be enabled only on full node VM skus like D15_V2, G5 etc.
+ // . Possible values include: 'Bronze', 'Silver', 'Gold'
+ DurabilityLevel DurabilityLevel `json:"durabilityLevel,omitempty"`
+ // ApplicationPorts - The range of ports from which cluster assigned port to Service Fabric applications.
+ ApplicationPorts *EndpointRangeDescription `json:"applicationPorts,omitempty"`
+ // EphemeralPorts - The range of ephemeral ports that nodes in this node type should be configured with.
+ EphemeralPorts *EndpointRangeDescription `json:"ephemeralPorts,omitempty"`
+ // IsPrimary - The node type on which system services will run. Only one node type should be marked as primary. Primary node type cannot be deleted or changed for existing clusters.
+ IsPrimary *bool `json:"isPrimary,omitempty"`
+ // VMInstanceCount - The number of nodes in the node type. This count should match the capacity property in the corresponding VirtualMachineScaleSet resource.
+ VMInstanceCount *int32 `json:"vmInstanceCount,omitempty"`
+ // ReverseProxyEndpointPort - The endpoint used by reverse proxy.
+ ReverseProxyEndpointPort *int32 `json:"reverseProxyEndpointPort,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for NodeTypeDescription.
+func (ntd NodeTypeDescription) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if ntd.Name != nil {
+ objectMap["name"] = ntd.Name
+ }
+ if ntd.PlacementProperties != nil {
+ objectMap["placementProperties"] = ntd.PlacementProperties
+ }
+ if ntd.Capacities != nil {
+ objectMap["capacities"] = ntd.Capacities
+ }
+ if ntd.ClientConnectionEndpointPort != nil {
+ objectMap["clientConnectionEndpointPort"] = ntd.ClientConnectionEndpointPort
+ }
+ if ntd.HTTPGatewayEndpointPort != nil {
+ objectMap["httpGatewayEndpointPort"] = ntd.HTTPGatewayEndpointPort
+ }
+ if ntd.DurabilityLevel != "" {
+ objectMap["durabilityLevel"] = ntd.DurabilityLevel
+ }
+ if ntd.ApplicationPorts != nil {
+ objectMap["applicationPorts"] = ntd.ApplicationPorts
+ }
+ if ntd.EphemeralPorts != nil {
+ objectMap["ephemeralPorts"] = ntd.EphemeralPorts
+ }
+ if ntd.IsPrimary != nil {
+ objectMap["isPrimary"] = ntd.IsPrimary
+ }
+ if ntd.VMInstanceCount != nil {
+ objectMap["vmInstanceCount"] = ntd.VMInstanceCount
+ }
+ if ntd.ReverseProxyEndpointPort != nil {
+ objectMap["reverseProxyEndpointPort"] = ntd.ReverseProxyEndpointPort
+ }
+ return json.Marshal(objectMap)
+}
+
+// OperationListResult describes the result of the request to list Service Fabric resource provider
+// operations.
+type OperationListResult struct {
+ autorest.Response `json:"-"`
+ // Value - List of operations supported by the Service Fabric resource provider.
+ Value *[]OperationResult `json:"value,omitempty"`
+ // NextLink - READ-ONLY; URL to get the next set of operation list results if there are any.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// OperationListResultIterator provides access to a complete listing of OperationResult values.
+type OperationListResultIterator struct {
+ i int
+ page OperationListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *OperationListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OperationListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *OperationListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter OperationListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter OperationListResultIterator) Response() OperationListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter OperationListResultIterator) Value() OperationResult {
+ if !iter.page.NotDone() {
+ return OperationResult{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the OperationListResultIterator type.
+func NewOperationListResultIterator(page OperationListResultPage) OperationListResultIterator {
+ return OperationListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (olr OperationListResult) IsEmpty() bool {
+ return olr.Value == nil || len(*olr.Value) == 0
+}
+
+// operationListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (olr OperationListResult) operationListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if olr.NextLink == nil || len(to.String(olr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(olr.NextLink)))
+}
+
+// OperationListResultPage contains a page of OperationResult values.
+type OperationListResultPage struct {
+ fn func(context.Context, OperationListResult) (OperationListResult, error)
+ olr OperationListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *OperationListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OperationListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.olr)
+ if err != nil {
+ return err
+ }
+ page.olr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *OperationListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page OperationListResultPage) NotDone() bool {
+ return !page.olr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page OperationListResultPage) Response() OperationListResult {
+ return page.olr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page OperationListResultPage) Values() []OperationResult {
+ if page.olr.IsEmpty() {
+ return nil
+ }
+ return *page.olr.Value
+}
+
+// Creates a new instance of the OperationListResultPage type.
+func NewOperationListResultPage(getNextPage func(context.Context, OperationListResult) (OperationListResult, error)) OperationListResultPage {
+ return OperationListResultPage{fn: getNextPage}
+}
+
+// OperationResult available operation list result
+type OperationResult struct {
+ // Name - The name of the operation.
+ Name *string `json:"name,omitempty"`
+ // Display - The object that represents the operation.
+ Display *AvailableOperationDisplay `json:"display,omitempty"`
+ // Origin - Origin result
+ Origin *string `json:"origin,omitempty"`
+ // NextLink - The URL to use for getting the next set of results.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// BasicPartitionSchemeDescription describes how the service is partitioned.
+type BasicPartitionSchemeDescription interface {
+ AsNamedPartitionSchemeDescription() (*NamedPartitionSchemeDescription, bool)
+ AsSingletonPartitionSchemeDescription() (*SingletonPartitionSchemeDescription, bool)
+ AsUniformInt64RangePartitionSchemeDescription() (*UniformInt64RangePartitionSchemeDescription, bool)
+ AsPartitionSchemeDescription() (*PartitionSchemeDescription, bool)
+}
+
+// PartitionSchemeDescription describes how the service is partitioned.
+type PartitionSchemeDescription struct {
+ // PartitionScheme - Possible values include: 'PartitionSchemePartitionSchemeDescription', 'PartitionSchemeNamed', 'PartitionSchemeSingleton', 'PartitionSchemeUniformInt64Range'
+ PartitionScheme PartitionSchemeBasicPartitionSchemeDescription `json:"partitionScheme,omitempty"`
+}
+
+func unmarshalBasicPartitionSchemeDescription(body []byte) (BasicPartitionSchemeDescription, error) {
+ var m map[string]interface{}
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return nil, err
+ }
+
+ switch m["partitionScheme"] {
+ case string(PartitionSchemeNamed):
+ var npsd NamedPartitionSchemeDescription
+ err := json.Unmarshal(body, &npsd)
+ return npsd, err
+ case string(PartitionSchemeSingleton):
+ var spsd SingletonPartitionSchemeDescription
+ err := json.Unmarshal(body, &spsd)
+ return spsd, err
+ case string(PartitionSchemeUniformInt64Range):
+ var ui6rpsd UniformInt64RangePartitionSchemeDescription
+ err := json.Unmarshal(body, &ui6rpsd)
+ return ui6rpsd, err
+ default:
+ var psd PartitionSchemeDescription
+ err := json.Unmarshal(body, &psd)
+ return psd, err
+ }
+}
+func unmarshalBasicPartitionSchemeDescriptionArray(body []byte) ([]BasicPartitionSchemeDescription, error) {
+ var rawMessages []*json.RawMessage
+ err := json.Unmarshal(body, &rawMessages)
+ if err != nil {
+ return nil, err
+ }
+
+ psdArray := make([]BasicPartitionSchemeDescription, len(rawMessages))
+
+ for index, rawMessage := range rawMessages {
+ psd, err := unmarshalBasicPartitionSchemeDescription(*rawMessage)
+ if err != nil {
+ return nil, err
+ }
+ psdArray[index] = psd
+ }
+ return psdArray, nil
+}
+
+// MarshalJSON is the custom marshaler for PartitionSchemeDescription.
+func (psd PartitionSchemeDescription) MarshalJSON() ([]byte, error) {
+ psd.PartitionScheme = PartitionSchemePartitionSchemeDescription
+ objectMap := make(map[string]interface{})
+ if psd.PartitionScheme != "" {
+ objectMap["partitionScheme"] = psd.PartitionScheme
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsNamedPartitionSchemeDescription is the BasicPartitionSchemeDescription implementation for PartitionSchemeDescription.
+func (psd PartitionSchemeDescription) AsNamedPartitionSchemeDescription() (*NamedPartitionSchemeDescription, bool) {
+ return nil, false
+}
+
+// AsSingletonPartitionSchemeDescription is the BasicPartitionSchemeDescription implementation for PartitionSchemeDescription.
+func (psd PartitionSchemeDescription) AsSingletonPartitionSchemeDescription() (*SingletonPartitionSchemeDescription, bool) {
+ return nil, false
+}
+
+// AsUniformInt64RangePartitionSchemeDescription is the BasicPartitionSchemeDescription implementation for PartitionSchemeDescription.
+func (psd PartitionSchemeDescription) AsUniformInt64RangePartitionSchemeDescription() (*UniformInt64RangePartitionSchemeDescription, bool) {
+ return nil, false
+}
+
+// AsPartitionSchemeDescription is the BasicPartitionSchemeDescription implementation for PartitionSchemeDescription.
+func (psd PartitionSchemeDescription) AsPartitionSchemeDescription() (*PartitionSchemeDescription, bool) {
+ return &psd, true
+}
+
+// AsBasicPartitionSchemeDescription is the BasicPartitionSchemeDescription implementation for PartitionSchemeDescription.
+func (psd PartitionSchemeDescription) AsBasicPartitionSchemeDescription() (BasicPartitionSchemeDescription, bool) {
+ return &psd, true
+}
+
+// ProxyResource the resource model definition for proxy-only resource.
+type ProxyResource struct {
+ // ID - READ-ONLY; Azure resource identifier.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Azure resource name.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Azure resource type.
+ Type *string `json:"type,omitempty"`
+ // Location - Azure resource location.
+ Location *string `json:"location,omitempty"`
+ // Tags - Azure resource tags.
+ Tags map[string]*string `json:"tags"`
+ // Etag - READ-ONLY; Azure resource etag.
+ Etag *string `json:"etag,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ProxyResource.
+func (pr ProxyResource) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if pr.Location != nil {
+ objectMap["location"] = pr.Location
+ }
+ if pr.Tags != nil {
+ objectMap["tags"] = pr.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// Resource the resource model definition.
+type Resource struct {
+ // ID - READ-ONLY; Azure resource identifier.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Azure resource name.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Azure resource type.
+ Type *string `json:"type,omitempty"`
+ // Location - Azure resource location.
+ Location *string `json:"location,omitempty"`
+ // Tags - Azure resource tags.
+ Tags map[string]*string `json:"tags"`
+ // Etag - READ-ONLY; Azure resource etag.
+ Etag *string `json:"etag,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for Resource.
+func (r Resource) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if r.Location != nil {
+ objectMap["location"] = r.Location
+ }
+ if r.Tags != nil {
+ objectMap["tags"] = r.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// ServerCertificateCommonName describes the server certificate details using common name.
+type ServerCertificateCommonName struct {
+ // CertificateCommonName - The common name of the server certificate.
+ CertificateCommonName *string `json:"certificateCommonName,omitempty"`
+ // CertificateIssuerThumbprint - The issuer thumbprint of the server certificate.
+ CertificateIssuerThumbprint *string `json:"certificateIssuerThumbprint,omitempty"`
+}
+
+// ServerCertificateCommonNames describes a list of server certificates referenced by common name that are
+// used to secure the cluster.
+type ServerCertificateCommonNames struct {
+ // CommonNames - The list of server certificates referenced by common name that are used to secure the cluster.
+ CommonNames *[]ServerCertificateCommonName `json:"commonNames,omitempty"`
+ // X509StoreName - The local certificate store location. Possible values include: 'X509StoreName1AddressBook', 'X509StoreName1AuthRoot', 'X509StoreName1CertificateAuthority', 'X509StoreName1Disallowed', 'X509StoreName1My', 'X509StoreName1Root', 'X509StoreName1TrustedPeople', 'X509StoreName1TrustedPublisher'
+ X509StoreName X509StoreName1 `json:"x509StoreName,omitempty"`
+}
+
+// ServiceCorrelationDescription creates a particular correlation between services.
+type ServiceCorrelationDescription struct {
+ // Scheme - The ServiceCorrelationScheme which describes the relationship between this service and the service specified via ServiceName. Possible values include: 'ServiceCorrelationSchemeInvalid', 'ServiceCorrelationSchemeAffinity', 'ServiceCorrelationSchemeAlignedAffinity', 'ServiceCorrelationSchemeNonAlignedAffinity'
+ Scheme ServiceCorrelationScheme `json:"scheme,omitempty"`
+ // ServiceName - The name of the service that the correlation relationship is established with.
+ ServiceName *string `json:"serviceName,omitempty"`
+}
+
+// ServiceLoadMetricDescription specifies a metric to load balance a service during runtime.
+type ServiceLoadMetricDescription struct {
+ // Name - The name of the metric. If the service chooses to report load during runtime, the load metric name should match the name that is specified in Name exactly. Note that metric names are case sensitive.
+ Name *string `json:"name,omitempty"`
+ // Weight - The service load metric relative weight, compared to other metrics configured for this service, as a number. Possible values include: 'ServiceLoadMetricWeightZero', 'ServiceLoadMetricWeightLow', 'ServiceLoadMetricWeightMedium', 'ServiceLoadMetricWeightHigh'
+ Weight ServiceLoadMetricWeight `json:"weight,omitempty"`
+ // PrimaryDefaultLoad - Used only for Stateful services. The default amount of load, as a number, that this service creates for this metric when it is a Primary replica.
+ PrimaryDefaultLoad *int32 `json:"primaryDefaultLoad,omitempty"`
+ // SecondaryDefaultLoad - Used only for Stateful services. The default amount of load, as a number, that this service creates for this metric when it is a Secondary replica.
+ SecondaryDefaultLoad *int32 `json:"secondaryDefaultLoad,omitempty"`
+ // DefaultLoad - Used only for Stateless services. The default amount of load, as a number, that this service creates for this metric.
+ DefaultLoad *int32 `json:"defaultLoad,omitempty"`
+}
+
+// BasicServicePlacementPolicyDescription describes the policy to be used for placement of a Service Fabric service.
+type BasicServicePlacementPolicyDescription interface {
+ AsServicePlacementPolicyDescription() (*ServicePlacementPolicyDescription, bool)
+}
+
+// ServicePlacementPolicyDescription describes the policy to be used for placement of a Service Fabric service.
+type ServicePlacementPolicyDescription struct {
+ // Type - Possible values include: 'TypeServicePlacementPolicyDescription'
+ Type Type `json:"Type,omitempty"`
+}
+
+func unmarshalBasicServicePlacementPolicyDescription(body []byte) (BasicServicePlacementPolicyDescription, error) {
+ var m map[string]interface{}
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return nil, err
+ }
+
+ switch m["Type"] {
+ default:
+ var sppd ServicePlacementPolicyDescription
+ err := json.Unmarshal(body, &sppd)
+ return sppd, err
+ }
+}
+func unmarshalBasicServicePlacementPolicyDescriptionArray(body []byte) ([]BasicServicePlacementPolicyDescription, error) {
+ var rawMessages []*json.RawMessage
+ err := json.Unmarshal(body, &rawMessages)
+ if err != nil {
+ return nil, err
+ }
+
+ sppdArray := make([]BasicServicePlacementPolicyDescription, len(rawMessages))
+
+ for index, rawMessage := range rawMessages {
+ sppd, err := unmarshalBasicServicePlacementPolicyDescription(*rawMessage)
+ if err != nil {
+ return nil, err
+ }
+ sppdArray[index] = sppd
+ }
+ return sppdArray, nil
+}
+
+// MarshalJSON is the custom marshaler for ServicePlacementPolicyDescription.
+func (sppd ServicePlacementPolicyDescription) MarshalJSON() ([]byte, error) {
+ sppd.Type = TypeServicePlacementPolicyDescription
+ objectMap := make(map[string]interface{})
+ if sppd.Type != "" {
+ objectMap["Type"] = sppd.Type
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsServicePlacementPolicyDescription is the BasicServicePlacementPolicyDescription implementation for ServicePlacementPolicyDescription.
+func (sppd ServicePlacementPolicyDescription) AsServicePlacementPolicyDescription() (*ServicePlacementPolicyDescription, bool) {
+ return &sppd, true
+}
+
+// AsBasicServicePlacementPolicyDescription is the BasicServicePlacementPolicyDescription implementation for ServicePlacementPolicyDescription.
+func (sppd ServicePlacementPolicyDescription) AsBasicServicePlacementPolicyDescription() (BasicServicePlacementPolicyDescription, bool) {
+ return &sppd, true
+}
+
+// ServiceResource the service resource.
+type ServiceResource struct {
+ autorest.Response `json:"-"`
+ // BasicServiceResourceProperties - The service resource properties.
+ BasicServiceResourceProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Azure resource identifier.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Azure resource name.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Azure resource type.
+ Type *string `json:"type,omitempty"`
+ // Location - Azure resource location.
+ Location *string `json:"location,omitempty"`
+ // Tags - Azure resource tags.
+ Tags map[string]*string `json:"tags"`
+ // Etag - READ-ONLY; Azure resource etag.
+ Etag *string `json:"etag,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ServiceResource.
+func (sr ServiceResource) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ objectMap["properties"] = sr.BasicServiceResourceProperties
+ if sr.Location != nil {
+ objectMap["location"] = sr.Location
+ }
+ if sr.Tags != nil {
+ objectMap["tags"] = sr.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ServiceResource struct.
+func (sr *ServiceResource) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ basicServiceResourceProperties, err := unmarshalBasicServiceResourceProperties(*v)
+ if err != nil {
+ return err
+ }
+ sr.BasicServiceResourceProperties = basicServiceResourceProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ sr.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ sr.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ sr.Type = &typeVar
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ sr.Location = &location
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ sr.Tags = tags
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ sr.Etag = &etag
+ }
+ }
+ }
+
+ return nil
+}
+
+// ServiceResourceList the list of service resources.
+type ServiceResourceList struct {
+ autorest.Response `json:"-"`
+ Value *[]ServiceResource `json:"value,omitempty"`
+ // NextLink - READ-ONLY; URL to get the next set of service list results if there are any.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// BasicServiceResourceProperties the service resource properties.
+type BasicServiceResourceProperties interface {
+ AsStatefulServiceProperties() (*StatefulServiceProperties, bool)
+ AsStatelessServiceProperties() (*StatelessServiceProperties, bool)
+ AsServiceResourceProperties() (*ServiceResourceProperties, bool)
+}
+
+// ServiceResourceProperties the service resource properties.
+type ServiceResourceProperties struct {
+ // ProvisioningState - READ-ONLY; The current deployment or provisioning state, which only appears in the response
+ ProvisioningState *string `json:"provisioningState,omitempty"`
+ // ServiceTypeName - The name of the service type
+ ServiceTypeName *string `json:"serviceTypeName,omitempty"`
+ // PartitionDescription - Describes how the service is partitioned.
+ PartitionDescription BasicPartitionSchemeDescription `json:"partitionDescription,omitempty"`
+ // ServicePackageActivationMode - The activation Mode of the service package. Possible values include: 'SharedProcess', 'ExclusiveProcess'
+ ServicePackageActivationMode ArmServicePackageActivationMode `json:"servicePackageActivationMode,omitempty"`
+ // ServiceKind - Possible values include: 'ServiceKindServiceResourceProperties', 'ServiceKindStateful1', 'ServiceKindStateless1'
+ ServiceKind ServiceKindBasicServiceResourceProperties `json:"serviceKind,omitempty"`
+ // PlacementConstraints - The placement constraints as a string. Placement constraints are boolean expressions on node properties and allow for restricting a service to particular nodes based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)".
+ PlacementConstraints *string `json:"placementConstraints,omitempty"`
+ // CorrelationScheme - A list that describes the correlation of the service with other services.
+ CorrelationScheme *[]ServiceCorrelationDescription `json:"correlationScheme,omitempty"`
+ // ServiceLoadMetrics - The service load metrics is given as an array of ServiceLoadMetricDescription objects.
+ ServiceLoadMetrics *[]ServiceLoadMetricDescription `json:"serviceLoadMetrics,omitempty"`
+ // ServicePlacementPolicies - A list that describes the correlation of the service with other services.
+ ServicePlacementPolicies *[]BasicServicePlacementPolicyDescription `json:"servicePlacementPolicies,omitempty"`
+ // DefaultMoveCost - Specifies the move cost for the service. Possible values include: 'Zero', 'Low', 'Medium', 'High'
+ DefaultMoveCost MoveCost `json:"defaultMoveCost,omitempty"`
+}
+
+func unmarshalBasicServiceResourceProperties(body []byte) (BasicServiceResourceProperties, error) {
+ var m map[string]interface{}
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return nil, err
+ }
+
+ switch m["serviceKind"] {
+ case string(ServiceKindStateful1):
+ var ssp StatefulServiceProperties
+ err := json.Unmarshal(body, &ssp)
+ return ssp, err
+ case string(ServiceKindStateless1):
+ var ssp StatelessServiceProperties
+ err := json.Unmarshal(body, &ssp)
+ return ssp, err
+ default:
+ var srp ServiceResourceProperties
+ err := json.Unmarshal(body, &srp)
+ return srp, err
+ }
+}
+func unmarshalBasicServiceResourcePropertiesArray(body []byte) ([]BasicServiceResourceProperties, error) {
+ var rawMessages []*json.RawMessage
+ err := json.Unmarshal(body, &rawMessages)
+ if err != nil {
+ return nil, err
+ }
+
+ srpArray := make([]BasicServiceResourceProperties, len(rawMessages))
+
+ for index, rawMessage := range rawMessages {
+ srp, err := unmarshalBasicServiceResourceProperties(*rawMessage)
+ if err != nil {
+ return nil, err
+ }
+ srpArray[index] = srp
+ }
+ return srpArray, nil
+}
+
+// MarshalJSON is the custom marshaler for ServiceResourceProperties.
+func (srp ServiceResourceProperties) MarshalJSON() ([]byte, error) {
+ srp.ServiceKind = ServiceKindServiceResourceProperties
+ objectMap := make(map[string]interface{})
+ if srp.ServiceTypeName != nil {
+ objectMap["serviceTypeName"] = srp.ServiceTypeName
+ }
+ objectMap["partitionDescription"] = srp.PartitionDescription
+ if srp.ServicePackageActivationMode != "" {
+ objectMap["servicePackageActivationMode"] = srp.ServicePackageActivationMode
+ }
+ if srp.ServiceKind != "" {
+ objectMap["serviceKind"] = srp.ServiceKind
+ }
+ if srp.PlacementConstraints != nil {
+ objectMap["placementConstraints"] = srp.PlacementConstraints
+ }
+ if srp.CorrelationScheme != nil {
+ objectMap["correlationScheme"] = srp.CorrelationScheme
+ }
+ if srp.ServiceLoadMetrics != nil {
+ objectMap["serviceLoadMetrics"] = srp.ServiceLoadMetrics
+ }
+ if srp.ServicePlacementPolicies != nil {
+ objectMap["servicePlacementPolicies"] = srp.ServicePlacementPolicies
+ }
+ if srp.DefaultMoveCost != "" {
+ objectMap["defaultMoveCost"] = srp.DefaultMoveCost
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsStatefulServiceProperties is the BasicServiceResourceProperties implementation for ServiceResourceProperties.
+func (srp ServiceResourceProperties) AsStatefulServiceProperties() (*StatefulServiceProperties, bool) {
+ return nil, false
+}
+
+// AsStatelessServiceProperties is the BasicServiceResourceProperties implementation for ServiceResourceProperties.
+func (srp ServiceResourceProperties) AsStatelessServiceProperties() (*StatelessServiceProperties, bool) {
+ return nil, false
+}
+
+// AsServiceResourceProperties is the BasicServiceResourceProperties implementation for ServiceResourceProperties.
+func (srp ServiceResourceProperties) AsServiceResourceProperties() (*ServiceResourceProperties, bool) {
+ return &srp, true
+}
+
+// AsBasicServiceResourceProperties is the BasicServiceResourceProperties implementation for ServiceResourceProperties.
+func (srp ServiceResourceProperties) AsBasicServiceResourceProperties() (BasicServiceResourceProperties, bool) {
+ return &srp, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for ServiceResourceProperties struct.
+func (srp *ServiceResourceProperties) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "provisioningState":
+ if v != nil {
+ var provisioningState string
+ err = json.Unmarshal(*v, &provisioningState)
+ if err != nil {
+ return err
+ }
+ srp.ProvisioningState = &provisioningState
+ }
+ case "serviceTypeName":
+ if v != nil {
+ var serviceTypeName string
+ err = json.Unmarshal(*v, &serviceTypeName)
+ if err != nil {
+ return err
+ }
+ srp.ServiceTypeName = &serviceTypeName
+ }
+ case "partitionDescription":
+ if v != nil {
+ partitionDescription, err := unmarshalBasicPartitionSchemeDescription(*v)
+ if err != nil {
+ return err
+ }
+ srp.PartitionDescription = partitionDescription
+ }
+ case "servicePackageActivationMode":
+ if v != nil {
+ var servicePackageActivationMode ArmServicePackageActivationMode
+ err = json.Unmarshal(*v, &servicePackageActivationMode)
+ if err != nil {
+ return err
+ }
+ srp.ServicePackageActivationMode = servicePackageActivationMode
+ }
+ case "serviceKind":
+ if v != nil {
+ var serviceKind ServiceKindBasicServiceResourceProperties
+ err = json.Unmarshal(*v, &serviceKind)
+ if err != nil {
+ return err
+ }
+ srp.ServiceKind = serviceKind
+ }
+ case "placementConstraints":
+ if v != nil {
+ var placementConstraints string
+ err = json.Unmarshal(*v, &placementConstraints)
+ if err != nil {
+ return err
+ }
+ srp.PlacementConstraints = &placementConstraints
+ }
+ case "correlationScheme":
+ if v != nil {
+ var correlationScheme []ServiceCorrelationDescription
+ err = json.Unmarshal(*v, &correlationScheme)
+ if err != nil {
+ return err
+ }
+ srp.CorrelationScheme = &correlationScheme
+ }
+ case "serviceLoadMetrics":
+ if v != nil {
+ var serviceLoadMetrics []ServiceLoadMetricDescription
+ err = json.Unmarshal(*v, &serviceLoadMetrics)
+ if err != nil {
+ return err
+ }
+ srp.ServiceLoadMetrics = &serviceLoadMetrics
+ }
+ case "servicePlacementPolicies":
+ if v != nil {
+ servicePlacementPolicies, err := unmarshalBasicServicePlacementPolicyDescriptionArray(*v)
+ if err != nil {
+ return err
+ }
+ srp.ServicePlacementPolicies = &servicePlacementPolicies
+ }
+ case "defaultMoveCost":
+ if v != nil {
+ var defaultMoveCost MoveCost
+ err = json.Unmarshal(*v, &defaultMoveCost)
+ if err != nil {
+ return err
+ }
+ srp.DefaultMoveCost = defaultMoveCost
+ }
+ }
+ }
+
+ return nil
+}
+
+// ServiceResourcePropertiesBase the common service resource properties.
+type ServiceResourcePropertiesBase struct {
+ // PlacementConstraints - The placement constraints as a string. Placement constraints are boolean expressions on node properties and allow for restricting a service to particular nodes based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)".
+ PlacementConstraints *string `json:"placementConstraints,omitempty"`
+ // CorrelationScheme - A list that describes the correlation of the service with other services.
+ CorrelationScheme *[]ServiceCorrelationDescription `json:"correlationScheme,omitempty"`
+ // ServiceLoadMetrics - The service load metrics is given as an array of ServiceLoadMetricDescription objects.
+ ServiceLoadMetrics *[]ServiceLoadMetricDescription `json:"serviceLoadMetrics,omitempty"`
+ // ServicePlacementPolicies - A list that describes the correlation of the service with other services.
+ ServicePlacementPolicies *[]BasicServicePlacementPolicyDescription `json:"servicePlacementPolicies,omitempty"`
+ // DefaultMoveCost - Specifies the move cost for the service. Possible values include: 'Zero', 'Low', 'Medium', 'High'
+ DefaultMoveCost MoveCost `json:"defaultMoveCost,omitempty"`
+}
+
+// UnmarshalJSON is the custom unmarshaler for ServiceResourcePropertiesBase struct.
+func (srpb *ServiceResourcePropertiesBase) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "placementConstraints":
+ if v != nil {
+ var placementConstraints string
+ err = json.Unmarshal(*v, &placementConstraints)
+ if err != nil {
+ return err
+ }
+ srpb.PlacementConstraints = &placementConstraints
+ }
+ case "correlationScheme":
+ if v != nil {
+ var correlationScheme []ServiceCorrelationDescription
+ err = json.Unmarshal(*v, &correlationScheme)
+ if err != nil {
+ return err
+ }
+ srpb.CorrelationScheme = &correlationScheme
+ }
+ case "serviceLoadMetrics":
+ if v != nil {
+ var serviceLoadMetrics []ServiceLoadMetricDescription
+ err = json.Unmarshal(*v, &serviceLoadMetrics)
+ if err != nil {
+ return err
+ }
+ srpb.ServiceLoadMetrics = &serviceLoadMetrics
+ }
+ case "servicePlacementPolicies":
+ if v != nil {
+ servicePlacementPolicies, err := unmarshalBasicServicePlacementPolicyDescriptionArray(*v)
+ if err != nil {
+ return err
+ }
+ srpb.ServicePlacementPolicies = &servicePlacementPolicies
+ }
+ case "defaultMoveCost":
+ if v != nil {
+ var defaultMoveCost MoveCost
+ err = json.Unmarshal(*v, &defaultMoveCost)
+ if err != nil {
+ return err
+ }
+ srpb.DefaultMoveCost = defaultMoveCost
+ }
+ }
+ }
+
+ return nil
+}
+
+// ServiceResourceUpdate the service resource for patch operations.
+type ServiceResourceUpdate struct {
+ // BasicServiceResourceUpdateProperties - The service resource properties for patch operations.
+ BasicServiceResourceUpdateProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Azure resource identifier.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Azure resource name.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Azure resource type.
+ Type *string `json:"type,omitempty"`
+ // Location - Azure resource location.
+ Location *string `json:"location,omitempty"`
+ // Tags - Azure resource tags.
+ Tags map[string]*string `json:"tags"`
+ // Etag - READ-ONLY; Azure resource etag.
+ Etag *string `json:"etag,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ServiceResourceUpdate.
+func (sru ServiceResourceUpdate) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ objectMap["properties"] = sru.BasicServiceResourceUpdateProperties
+ if sru.Location != nil {
+ objectMap["location"] = sru.Location
+ }
+ if sru.Tags != nil {
+ objectMap["tags"] = sru.Tags
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ServiceResourceUpdate struct.
+func (sru *ServiceResourceUpdate) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ basicServiceResourceUpdateProperties, err := unmarshalBasicServiceResourceUpdateProperties(*v)
+ if err != nil {
+ return err
+ }
+ sru.BasicServiceResourceUpdateProperties = basicServiceResourceUpdateProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ sru.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ sru.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ sru.Type = &typeVar
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ sru.Location = &location
+ }
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ sru.Tags = tags
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ sru.Etag = &etag
+ }
+ }
+ }
+
+ return nil
+}
+
+// BasicServiceResourceUpdateProperties the service resource properties for patch operations.
+type BasicServiceResourceUpdateProperties interface {
+ AsStatefulServiceUpdateProperties() (*StatefulServiceUpdateProperties, bool)
+ AsStatelessServiceUpdateProperties() (*StatelessServiceUpdateProperties, bool)
+ AsServiceResourceUpdateProperties() (*ServiceResourceUpdateProperties, bool)
+}
+
+// ServiceResourceUpdateProperties the service resource properties for patch operations.
+type ServiceResourceUpdateProperties struct {
+ // ServiceKind - Possible values include: 'ServiceKindBasicServiceResourceUpdatePropertiesServiceKindServiceResourceUpdateProperties', 'ServiceKindBasicServiceResourceUpdatePropertiesServiceKindStateful', 'ServiceKindBasicServiceResourceUpdatePropertiesServiceKindStateless'
+ ServiceKind ServiceKindBasicServiceResourceUpdateProperties `json:"serviceKind,omitempty"`
+ // PlacementConstraints - The placement constraints as a string. Placement constraints are boolean expressions on node properties and allow for restricting a service to particular nodes based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)".
+ PlacementConstraints *string `json:"placementConstraints,omitempty"`
+ // CorrelationScheme - A list that describes the correlation of the service with other services.
+ CorrelationScheme *[]ServiceCorrelationDescription `json:"correlationScheme,omitempty"`
+ // ServiceLoadMetrics - The service load metrics is given as an array of ServiceLoadMetricDescription objects.
+ ServiceLoadMetrics *[]ServiceLoadMetricDescription `json:"serviceLoadMetrics,omitempty"`
+ // ServicePlacementPolicies - A list that describes the correlation of the service with other services.
+ ServicePlacementPolicies *[]BasicServicePlacementPolicyDescription `json:"servicePlacementPolicies,omitempty"`
+ // DefaultMoveCost - Specifies the move cost for the service. Possible values include: 'Zero', 'Low', 'Medium', 'High'
+ DefaultMoveCost MoveCost `json:"defaultMoveCost,omitempty"`
+}
+
+func unmarshalBasicServiceResourceUpdateProperties(body []byte) (BasicServiceResourceUpdateProperties, error) {
+ var m map[string]interface{}
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return nil, err
+ }
+
+ switch m["serviceKind"] {
+ case string(ServiceKindBasicServiceResourceUpdatePropertiesServiceKindStateful):
+ var ssup StatefulServiceUpdateProperties
+ err := json.Unmarshal(body, &ssup)
+ return ssup, err
+ case string(ServiceKindBasicServiceResourceUpdatePropertiesServiceKindStateless):
+ var ssup StatelessServiceUpdateProperties
+ err := json.Unmarshal(body, &ssup)
+ return ssup, err
+ default:
+ var srup ServiceResourceUpdateProperties
+ err := json.Unmarshal(body, &srup)
+ return srup, err
+ }
+}
+func unmarshalBasicServiceResourceUpdatePropertiesArray(body []byte) ([]BasicServiceResourceUpdateProperties, error) {
+ var rawMessages []*json.RawMessage
+ err := json.Unmarshal(body, &rawMessages)
+ if err != nil {
+ return nil, err
+ }
+
+ srupArray := make([]BasicServiceResourceUpdateProperties, len(rawMessages))
+
+ for index, rawMessage := range rawMessages {
+ srup, err := unmarshalBasicServiceResourceUpdateProperties(*rawMessage)
+ if err != nil {
+ return nil, err
+ }
+ srupArray[index] = srup
+ }
+ return srupArray, nil
+}
+
+// MarshalJSON is the custom marshaler for ServiceResourceUpdateProperties.
+func (srup ServiceResourceUpdateProperties) MarshalJSON() ([]byte, error) {
+ srup.ServiceKind = ServiceKindBasicServiceResourceUpdatePropertiesServiceKindServiceResourceUpdateProperties
+ objectMap := make(map[string]interface{})
+ if srup.ServiceKind != "" {
+ objectMap["serviceKind"] = srup.ServiceKind
+ }
+ if srup.PlacementConstraints != nil {
+ objectMap["placementConstraints"] = srup.PlacementConstraints
+ }
+ if srup.CorrelationScheme != nil {
+ objectMap["correlationScheme"] = srup.CorrelationScheme
+ }
+ if srup.ServiceLoadMetrics != nil {
+ objectMap["serviceLoadMetrics"] = srup.ServiceLoadMetrics
+ }
+ if srup.ServicePlacementPolicies != nil {
+ objectMap["servicePlacementPolicies"] = srup.ServicePlacementPolicies
+ }
+ if srup.DefaultMoveCost != "" {
+ objectMap["defaultMoveCost"] = srup.DefaultMoveCost
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsStatefulServiceUpdateProperties is the BasicServiceResourceUpdateProperties implementation for ServiceResourceUpdateProperties.
+func (srup ServiceResourceUpdateProperties) AsStatefulServiceUpdateProperties() (*StatefulServiceUpdateProperties, bool) {
+ return nil, false
+}
+
+// AsStatelessServiceUpdateProperties is the BasicServiceResourceUpdateProperties implementation for ServiceResourceUpdateProperties.
+func (srup ServiceResourceUpdateProperties) AsStatelessServiceUpdateProperties() (*StatelessServiceUpdateProperties, bool) {
+ return nil, false
+}
+
+// AsServiceResourceUpdateProperties is the BasicServiceResourceUpdateProperties implementation for ServiceResourceUpdateProperties.
+func (srup ServiceResourceUpdateProperties) AsServiceResourceUpdateProperties() (*ServiceResourceUpdateProperties, bool) {
+ return &srup, true
+}
+
+// AsBasicServiceResourceUpdateProperties is the BasicServiceResourceUpdateProperties implementation for ServiceResourceUpdateProperties.
+func (srup ServiceResourceUpdateProperties) AsBasicServiceResourceUpdateProperties() (BasicServiceResourceUpdateProperties, bool) {
+ return &srup, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for ServiceResourceUpdateProperties struct.
+func (srup *ServiceResourceUpdateProperties) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "serviceKind":
+ if v != nil {
+ var serviceKind ServiceKindBasicServiceResourceUpdateProperties
+ err = json.Unmarshal(*v, &serviceKind)
+ if err != nil {
+ return err
+ }
+ srup.ServiceKind = serviceKind
+ }
+ case "placementConstraints":
+ if v != nil {
+ var placementConstraints string
+ err = json.Unmarshal(*v, &placementConstraints)
+ if err != nil {
+ return err
+ }
+ srup.PlacementConstraints = &placementConstraints
+ }
+ case "correlationScheme":
+ if v != nil {
+ var correlationScheme []ServiceCorrelationDescription
+ err = json.Unmarshal(*v, &correlationScheme)
+ if err != nil {
+ return err
+ }
+ srup.CorrelationScheme = &correlationScheme
+ }
+ case "serviceLoadMetrics":
+ if v != nil {
+ var serviceLoadMetrics []ServiceLoadMetricDescription
+ err = json.Unmarshal(*v, &serviceLoadMetrics)
+ if err != nil {
+ return err
+ }
+ srup.ServiceLoadMetrics = &serviceLoadMetrics
+ }
+ case "servicePlacementPolicies":
+ if v != nil {
+ servicePlacementPolicies, err := unmarshalBasicServicePlacementPolicyDescriptionArray(*v)
+ if err != nil {
+ return err
+ }
+ srup.ServicePlacementPolicies = &servicePlacementPolicies
+ }
+ case "defaultMoveCost":
+ if v != nil {
+ var defaultMoveCost MoveCost
+ err = json.Unmarshal(*v, &defaultMoveCost)
+ if err != nil {
+ return err
+ }
+ srup.DefaultMoveCost = defaultMoveCost
+ }
+ }
+ }
+
+ return nil
+}
+
+// ServicesCreateFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type ServicesCreateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *ServicesCreateFuture) Result(client ServicesClient) (sr ServiceResource, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ServicesCreateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("servicefabric.ServicesCreateFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if sr.Response.Response, err = future.GetResult(sender); err == nil && sr.Response.Response.StatusCode != http.StatusNoContent {
+ sr, err = client.CreateResponder(sr.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ServicesCreateFuture", "Result", sr.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// ServicesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type ServicesDeleteFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *ServicesDeleteFuture) Result(client ServicesClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ServicesDeleteFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("servicefabric.ServicesDeleteFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
+// ServicesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
+type ServicesUpdateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *ServicesUpdateFuture) Result(client ServicesClient) (sr ServiceResource, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ServicesUpdateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("servicefabric.ServicesUpdateFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if sr.Response.Response, err = future.GetResult(sender); err == nil && sr.Response.Response.StatusCode != http.StatusNoContent {
+ sr, err = client.UpdateResponder(sr.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ServicesUpdateFuture", "Result", sr.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// ServiceTypeDeltaHealthPolicy represents the delta health policy used to evaluate the health of services
+// belonging to a service type when upgrading the cluster.
+type ServiceTypeDeltaHealthPolicy struct {
+ // MaxPercentDeltaUnhealthyServices - The maximum allowed percentage of services health degradation allowed during cluster upgrades.
+ // The delta is measured between the state of the services at the beginning of upgrade and the state of the services at the time of the health evaluation.
+ // The check is performed after every upgrade domain upgrade completion to make sure the global state of the cluster is within tolerated limits.
+ MaxPercentDeltaUnhealthyServices *int32 `json:"maxPercentDeltaUnhealthyServices,omitempty"`
+}
+
+// ServiceTypeHealthPolicy represents the health policy used to evaluate the health of services belonging
+// to a service type.
+type ServiceTypeHealthPolicy struct {
+ // MaxPercentUnhealthyServices - The maximum percentage of services allowed to be unhealthy before your application is considered in error.
+ MaxPercentUnhealthyServices *int32 `json:"maxPercentUnhealthyServices,omitempty"`
+}
+
+// SettingsParameterDescription describes a parameter in fabric settings of the cluster.
+type SettingsParameterDescription struct {
+ // Name - The parameter name of fabric setting.
+ Name *string `json:"name,omitempty"`
+ // Value - The parameter value of fabric setting.
+ Value *string `json:"value,omitempty"`
+}
+
+// SettingsSectionDescription describes a section in the fabric settings of the cluster.
+type SettingsSectionDescription struct {
+ // Name - The section name of the fabric settings.
+ Name *string `json:"name,omitempty"`
+ // Parameters - The collection of parameters in the section.
+ Parameters *[]SettingsParameterDescription `json:"parameters,omitempty"`
+}
+
+// SingletonPartitionSchemeDescription describes the partition scheme of a singleton-partitioned, or
+// non-partitioned service.
+type SingletonPartitionSchemeDescription struct {
+ // PartitionScheme - Possible values include: 'PartitionSchemePartitionSchemeDescription', 'PartitionSchemeNamed', 'PartitionSchemeSingleton', 'PartitionSchemeUniformInt64Range'
+ PartitionScheme PartitionSchemeBasicPartitionSchemeDescription `json:"partitionScheme,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for SingletonPartitionSchemeDescription.
+func (spsd SingletonPartitionSchemeDescription) MarshalJSON() ([]byte, error) {
+ spsd.PartitionScheme = PartitionSchemeSingleton
+ objectMap := make(map[string]interface{})
+ if spsd.PartitionScheme != "" {
+ objectMap["partitionScheme"] = spsd.PartitionScheme
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsNamedPartitionSchemeDescription is the BasicPartitionSchemeDescription implementation for SingletonPartitionSchemeDescription.
+func (spsd SingletonPartitionSchemeDescription) AsNamedPartitionSchemeDescription() (*NamedPartitionSchemeDescription, bool) {
+ return nil, false
+}
+
+// AsSingletonPartitionSchemeDescription is the BasicPartitionSchemeDescription implementation for SingletonPartitionSchemeDescription.
+func (spsd SingletonPartitionSchemeDescription) AsSingletonPartitionSchemeDescription() (*SingletonPartitionSchemeDescription, bool) {
+ return &spsd, true
+}
+
+// AsUniformInt64RangePartitionSchemeDescription is the BasicPartitionSchemeDescription implementation for SingletonPartitionSchemeDescription.
+func (spsd SingletonPartitionSchemeDescription) AsUniformInt64RangePartitionSchemeDescription() (*UniformInt64RangePartitionSchemeDescription, bool) {
+ return nil, false
+}
+
+// AsPartitionSchemeDescription is the BasicPartitionSchemeDescription implementation for SingletonPartitionSchemeDescription.
+func (spsd SingletonPartitionSchemeDescription) AsPartitionSchemeDescription() (*PartitionSchemeDescription, bool) {
+ return nil, false
+}
+
+// AsBasicPartitionSchemeDescription is the BasicPartitionSchemeDescription implementation for SingletonPartitionSchemeDescription.
+func (spsd SingletonPartitionSchemeDescription) AsBasicPartitionSchemeDescription() (BasicPartitionSchemeDescription, bool) {
+ return &spsd, true
+}
+
+// StatefulServiceProperties the properties of a stateful service resource.
+type StatefulServiceProperties struct {
+ // HasPersistedState - A flag indicating whether this is a persistent service which stores states on the local disk. If it is then the value of this property is true, if not it is false.
+ HasPersistedState *bool `json:"hasPersistedState,omitempty"`
+ // TargetReplicaSetSize - The target replica set size as a number.
+ TargetReplicaSetSize *int32 `json:"targetReplicaSetSize,omitempty"`
+ // MinReplicaSetSize - The minimum replica set size as a number.
+ MinReplicaSetSize *int32 `json:"minReplicaSetSize,omitempty"`
+ // ReplicaRestartWaitDuration - The duration between when a replica goes down and when a new replica is created, represented in ISO 8601 format (hh:mm:ss.s).
+ ReplicaRestartWaitDuration *date.Time `json:"replicaRestartWaitDuration,omitempty"`
+ // QuorumLossWaitDuration - The maximum duration for which a partition is allowed to be in a state of quorum loss, represented in ISO 8601 format (hh:mm:ss.s).
+ QuorumLossWaitDuration *date.Time `json:"quorumLossWaitDuration,omitempty"`
+ // StandByReplicaKeepDuration - The definition on how long StandBy replicas should be maintained before being removed, represented in ISO 8601 format (hh:mm:ss.s).
+ StandByReplicaKeepDuration *date.Time `json:"standByReplicaKeepDuration,omitempty"`
+ // ProvisioningState - READ-ONLY; The current deployment or provisioning state, which only appears in the response
+ ProvisioningState *string `json:"provisioningState,omitempty"`
+ // ServiceTypeName - The name of the service type
+ ServiceTypeName *string `json:"serviceTypeName,omitempty"`
+ // PartitionDescription - Describes how the service is partitioned.
+ PartitionDescription BasicPartitionSchemeDescription `json:"partitionDescription,omitempty"`
+ // ServicePackageActivationMode - The activation Mode of the service package. Possible values include: 'SharedProcess', 'ExclusiveProcess'
+ ServicePackageActivationMode ArmServicePackageActivationMode `json:"servicePackageActivationMode,omitempty"`
+ // ServiceKind - Possible values include: 'ServiceKindServiceResourceProperties', 'ServiceKindStateful1', 'ServiceKindStateless1'
+ ServiceKind ServiceKindBasicServiceResourceProperties `json:"serviceKind,omitempty"`
+ // PlacementConstraints - The placement constraints as a string. Placement constraints are boolean expressions on node properties and allow for restricting a service to particular nodes based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)".
+ PlacementConstraints *string `json:"placementConstraints,omitempty"`
+ // CorrelationScheme - A list that describes the correlation of the service with other services.
+ CorrelationScheme *[]ServiceCorrelationDescription `json:"correlationScheme,omitempty"`
+ // ServiceLoadMetrics - The service load metrics is given as an array of ServiceLoadMetricDescription objects.
+ ServiceLoadMetrics *[]ServiceLoadMetricDescription `json:"serviceLoadMetrics,omitempty"`
+ // ServicePlacementPolicies - A list that describes the correlation of the service with other services.
+ ServicePlacementPolicies *[]BasicServicePlacementPolicyDescription `json:"servicePlacementPolicies,omitempty"`
+ // DefaultMoveCost - Specifies the move cost for the service. Possible values include: 'Zero', 'Low', 'Medium', 'High'
+ DefaultMoveCost MoveCost `json:"defaultMoveCost,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for StatefulServiceProperties.
+func (ssp StatefulServiceProperties) MarshalJSON() ([]byte, error) {
+ ssp.ServiceKind = ServiceKindStateful1
+ objectMap := make(map[string]interface{})
+ if ssp.HasPersistedState != nil {
+ objectMap["hasPersistedState"] = ssp.HasPersistedState
+ }
+ if ssp.TargetReplicaSetSize != nil {
+ objectMap["targetReplicaSetSize"] = ssp.TargetReplicaSetSize
+ }
+ if ssp.MinReplicaSetSize != nil {
+ objectMap["minReplicaSetSize"] = ssp.MinReplicaSetSize
+ }
+ if ssp.ReplicaRestartWaitDuration != nil {
+ objectMap["replicaRestartWaitDuration"] = ssp.ReplicaRestartWaitDuration
+ }
+ if ssp.QuorumLossWaitDuration != nil {
+ objectMap["quorumLossWaitDuration"] = ssp.QuorumLossWaitDuration
+ }
+ if ssp.StandByReplicaKeepDuration != nil {
+ objectMap["standByReplicaKeepDuration"] = ssp.StandByReplicaKeepDuration
+ }
+ if ssp.ServiceTypeName != nil {
+ objectMap["serviceTypeName"] = ssp.ServiceTypeName
+ }
+ objectMap["partitionDescription"] = ssp.PartitionDescription
+ if ssp.ServicePackageActivationMode != "" {
+ objectMap["servicePackageActivationMode"] = ssp.ServicePackageActivationMode
+ }
+ if ssp.ServiceKind != "" {
+ objectMap["serviceKind"] = ssp.ServiceKind
+ }
+ if ssp.PlacementConstraints != nil {
+ objectMap["placementConstraints"] = ssp.PlacementConstraints
+ }
+ if ssp.CorrelationScheme != nil {
+ objectMap["correlationScheme"] = ssp.CorrelationScheme
+ }
+ if ssp.ServiceLoadMetrics != nil {
+ objectMap["serviceLoadMetrics"] = ssp.ServiceLoadMetrics
+ }
+ if ssp.ServicePlacementPolicies != nil {
+ objectMap["servicePlacementPolicies"] = ssp.ServicePlacementPolicies
+ }
+ if ssp.DefaultMoveCost != "" {
+ objectMap["defaultMoveCost"] = ssp.DefaultMoveCost
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsStatefulServiceProperties is the BasicServiceResourceProperties implementation for StatefulServiceProperties.
+func (ssp StatefulServiceProperties) AsStatefulServiceProperties() (*StatefulServiceProperties, bool) {
+ return &ssp, true
+}
+
+// AsStatelessServiceProperties is the BasicServiceResourceProperties implementation for StatefulServiceProperties.
+func (ssp StatefulServiceProperties) AsStatelessServiceProperties() (*StatelessServiceProperties, bool) {
+ return nil, false
+}
+
+// AsServiceResourceProperties is the BasicServiceResourceProperties implementation for StatefulServiceProperties.
+func (ssp StatefulServiceProperties) AsServiceResourceProperties() (*ServiceResourceProperties, bool) {
+ return nil, false
+}
+
+// AsBasicServiceResourceProperties is the BasicServiceResourceProperties implementation for StatefulServiceProperties.
+func (ssp StatefulServiceProperties) AsBasicServiceResourceProperties() (BasicServiceResourceProperties, bool) {
+ return &ssp, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for StatefulServiceProperties struct.
+func (ssp *StatefulServiceProperties) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "hasPersistedState":
+ if v != nil {
+ var hasPersistedState bool
+ err = json.Unmarshal(*v, &hasPersistedState)
+ if err != nil {
+ return err
+ }
+ ssp.HasPersistedState = &hasPersistedState
+ }
+ case "targetReplicaSetSize":
+ if v != nil {
+ var targetReplicaSetSize int32
+ err = json.Unmarshal(*v, &targetReplicaSetSize)
+ if err != nil {
+ return err
+ }
+ ssp.TargetReplicaSetSize = &targetReplicaSetSize
+ }
+ case "minReplicaSetSize":
+ if v != nil {
+ var minReplicaSetSize int32
+ err = json.Unmarshal(*v, &minReplicaSetSize)
+ if err != nil {
+ return err
+ }
+ ssp.MinReplicaSetSize = &minReplicaSetSize
+ }
+ case "replicaRestartWaitDuration":
+ if v != nil {
+ var replicaRestartWaitDuration date.Time
+ err = json.Unmarshal(*v, &replicaRestartWaitDuration)
+ if err != nil {
+ return err
+ }
+ ssp.ReplicaRestartWaitDuration = &replicaRestartWaitDuration
+ }
+ case "quorumLossWaitDuration":
+ if v != nil {
+ var quorumLossWaitDuration date.Time
+ err = json.Unmarshal(*v, &quorumLossWaitDuration)
+ if err != nil {
+ return err
+ }
+ ssp.QuorumLossWaitDuration = &quorumLossWaitDuration
+ }
+ case "standByReplicaKeepDuration":
+ if v != nil {
+ var standByReplicaKeepDuration date.Time
+ err = json.Unmarshal(*v, &standByReplicaKeepDuration)
+ if err != nil {
+ return err
+ }
+ ssp.StandByReplicaKeepDuration = &standByReplicaKeepDuration
+ }
+ case "provisioningState":
+ if v != nil {
+ var provisioningState string
+ err = json.Unmarshal(*v, &provisioningState)
+ if err != nil {
+ return err
+ }
+ ssp.ProvisioningState = &provisioningState
+ }
+ case "serviceTypeName":
+ if v != nil {
+ var serviceTypeName string
+ err = json.Unmarshal(*v, &serviceTypeName)
+ if err != nil {
+ return err
+ }
+ ssp.ServiceTypeName = &serviceTypeName
+ }
+ case "partitionDescription":
+ if v != nil {
+ partitionDescription, err := unmarshalBasicPartitionSchemeDescription(*v)
+ if err != nil {
+ return err
+ }
+ ssp.PartitionDescription = partitionDescription
+ }
+ case "servicePackageActivationMode":
+ if v != nil {
+ var servicePackageActivationMode ArmServicePackageActivationMode
+ err = json.Unmarshal(*v, &servicePackageActivationMode)
+ if err != nil {
+ return err
+ }
+ ssp.ServicePackageActivationMode = servicePackageActivationMode
+ }
+ case "serviceKind":
+ if v != nil {
+ var serviceKind ServiceKindBasicServiceResourceProperties
+ err = json.Unmarshal(*v, &serviceKind)
+ if err != nil {
+ return err
+ }
+ ssp.ServiceKind = serviceKind
+ }
+ case "placementConstraints":
+ if v != nil {
+ var placementConstraints string
+ err = json.Unmarshal(*v, &placementConstraints)
+ if err != nil {
+ return err
+ }
+ ssp.PlacementConstraints = &placementConstraints
+ }
+ case "correlationScheme":
+ if v != nil {
+ var correlationScheme []ServiceCorrelationDescription
+ err = json.Unmarshal(*v, &correlationScheme)
+ if err != nil {
+ return err
+ }
+ ssp.CorrelationScheme = &correlationScheme
+ }
+ case "serviceLoadMetrics":
+ if v != nil {
+ var serviceLoadMetrics []ServiceLoadMetricDescription
+ err = json.Unmarshal(*v, &serviceLoadMetrics)
+ if err != nil {
+ return err
+ }
+ ssp.ServiceLoadMetrics = &serviceLoadMetrics
+ }
+ case "servicePlacementPolicies":
+ if v != nil {
+ servicePlacementPolicies, err := unmarshalBasicServicePlacementPolicyDescriptionArray(*v)
+ if err != nil {
+ return err
+ }
+ ssp.ServicePlacementPolicies = &servicePlacementPolicies
+ }
+ case "defaultMoveCost":
+ if v != nil {
+ var defaultMoveCost MoveCost
+ err = json.Unmarshal(*v, &defaultMoveCost)
+ if err != nil {
+ return err
+ }
+ ssp.DefaultMoveCost = defaultMoveCost
+ }
+ }
+ }
+
+ return nil
+}
+
+// StatefulServiceUpdateProperties the properties of a stateful service resource for patch operations.
+type StatefulServiceUpdateProperties struct {
+ // TargetReplicaSetSize - The target replica set size as a number.
+ TargetReplicaSetSize *int32 `json:"targetReplicaSetSize,omitempty"`
+ // MinReplicaSetSize - The minimum replica set size as a number.
+ MinReplicaSetSize *int32 `json:"minReplicaSetSize,omitempty"`
+ // ReplicaRestartWaitDuration - The duration between when a replica goes down and when a new replica is created, represented in ISO 8601 format (hh:mm:ss.s).
+ ReplicaRestartWaitDuration *date.Time `json:"replicaRestartWaitDuration,omitempty"`
+ // QuorumLossWaitDuration - The maximum duration for which a partition is allowed to be in a state of quorum loss, represented in ISO 8601 format (hh:mm:ss.s).
+ QuorumLossWaitDuration *date.Time `json:"quorumLossWaitDuration,omitempty"`
+ // StandByReplicaKeepDuration - The definition on how long StandBy replicas should be maintained before being removed, represented in ISO 8601 format (hh:mm:ss.s).
+ StandByReplicaKeepDuration *date.Time `json:"standByReplicaKeepDuration,omitempty"`
+ // ServiceKind - Possible values include: 'ServiceKindBasicServiceResourceUpdatePropertiesServiceKindServiceResourceUpdateProperties', 'ServiceKindBasicServiceResourceUpdatePropertiesServiceKindStateful', 'ServiceKindBasicServiceResourceUpdatePropertiesServiceKindStateless'
+ ServiceKind ServiceKindBasicServiceResourceUpdateProperties `json:"serviceKind,omitempty"`
+ // PlacementConstraints - The placement constraints as a string. Placement constraints are boolean expressions on node properties and allow for restricting a service to particular nodes based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)".
+ PlacementConstraints *string `json:"placementConstraints,omitempty"`
+ // CorrelationScheme - A list that describes the correlation of the service with other services.
+ CorrelationScheme *[]ServiceCorrelationDescription `json:"correlationScheme,omitempty"`
+ // ServiceLoadMetrics - The service load metrics is given as an array of ServiceLoadMetricDescription objects.
+ ServiceLoadMetrics *[]ServiceLoadMetricDescription `json:"serviceLoadMetrics,omitempty"`
+ // ServicePlacementPolicies - A list that describes the correlation of the service with other services.
+ ServicePlacementPolicies *[]BasicServicePlacementPolicyDescription `json:"servicePlacementPolicies,omitempty"`
+ // DefaultMoveCost - Specifies the move cost for the service. Possible values include: 'Zero', 'Low', 'Medium', 'High'
+ DefaultMoveCost MoveCost `json:"defaultMoveCost,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for StatefulServiceUpdateProperties.
+func (ssup StatefulServiceUpdateProperties) MarshalJSON() ([]byte, error) {
+ ssup.ServiceKind = ServiceKindBasicServiceResourceUpdatePropertiesServiceKindStateful
+ objectMap := make(map[string]interface{})
+ if ssup.TargetReplicaSetSize != nil {
+ objectMap["targetReplicaSetSize"] = ssup.TargetReplicaSetSize
+ }
+ if ssup.MinReplicaSetSize != nil {
+ objectMap["minReplicaSetSize"] = ssup.MinReplicaSetSize
+ }
+ if ssup.ReplicaRestartWaitDuration != nil {
+ objectMap["replicaRestartWaitDuration"] = ssup.ReplicaRestartWaitDuration
+ }
+ if ssup.QuorumLossWaitDuration != nil {
+ objectMap["quorumLossWaitDuration"] = ssup.QuorumLossWaitDuration
+ }
+ if ssup.StandByReplicaKeepDuration != nil {
+ objectMap["standByReplicaKeepDuration"] = ssup.StandByReplicaKeepDuration
+ }
+ if ssup.ServiceKind != "" {
+ objectMap["serviceKind"] = ssup.ServiceKind
+ }
+ if ssup.PlacementConstraints != nil {
+ objectMap["placementConstraints"] = ssup.PlacementConstraints
+ }
+ if ssup.CorrelationScheme != nil {
+ objectMap["correlationScheme"] = ssup.CorrelationScheme
+ }
+ if ssup.ServiceLoadMetrics != nil {
+ objectMap["serviceLoadMetrics"] = ssup.ServiceLoadMetrics
+ }
+ if ssup.ServicePlacementPolicies != nil {
+ objectMap["servicePlacementPolicies"] = ssup.ServicePlacementPolicies
+ }
+ if ssup.DefaultMoveCost != "" {
+ objectMap["defaultMoveCost"] = ssup.DefaultMoveCost
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsStatefulServiceUpdateProperties is the BasicServiceResourceUpdateProperties implementation for StatefulServiceUpdateProperties.
+func (ssup StatefulServiceUpdateProperties) AsStatefulServiceUpdateProperties() (*StatefulServiceUpdateProperties, bool) {
+ return &ssup, true
+}
+
+// AsStatelessServiceUpdateProperties is the BasicServiceResourceUpdateProperties implementation for StatefulServiceUpdateProperties.
+func (ssup StatefulServiceUpdateProperties) AsStatelessServiceUpdateProperties() (*StatelessServiceUpdateProperties, bool) {
+ return nil, false
+}
+
+// AsServiceResourceUpdateProperties is the BasicServiceResourceUpdateProperties implementation for StatefulServiceUpdateProperties.
+func (ssup StatefulServiceUpdateProperties) AsServiceResourceUpdateProperties() (*ServiceResourceUpdateProperties, bool) {
+ return nil, false
+}
+
+// AsBasicServiceResourceUpdateProperties is the BasicServiceResourceUpdateProperties implementation for StatefulServiceUpdateProperties.
+func (ssup StatefulServiceUpdateProperties) AsBasicServiceResourceUpdateProperties() (BasicServiceResourceUpdateProperties, bool) {
+ return &ssup, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for StatefulServiceUpdateProperties struct.
+func (ssup *StatefulServiceUpdateProperties) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "targetReplicaSetSize":
+ if v != nil {
+ var targetReplicaSetSize int32
+ err = json.Unmarshal(*v, &targetReplicaSetSize)
+ if err != nil {
+ return err
+ }
+ ssup.TargetReplicaSetSize = &targetReplicaSetSize
+ }
+ case "minReplicaSetSize":
+ if v != nil {
+ var minReplicaSetSize int32
+ err = json.Unmarshal(*v, &minReplicaSetSize)
+ if err != nil {
+ return err
+ }
+ ssup.MinReplicaSetSize = &minReplicaSetSize
+ }
+ case "replicaRestartWaitDuration":
+ if v != nil {
+ var replicaRestartWaitDuration date.Time
+ err = json.Unmarshal(*v, &replicaRestartWaitDuration)
+ if err != nil {
+ return err
+ }
+ ssup.ReplicaRestartWaitDuration = &replicaRestartWaitDuration
+ }
+ case "quorumLossWaitDuration":
+ if v != nil {
+ var quorumLossWaitDuration date.Time
+ err = json.Unmarshal(*v, &quorumLossWaitDuration)
+ if err != nil {
+ return err
+ }
+ ssup.QuorumLossWaitDuration = &quorumLossWaitDuration
+ }
+ case "standByReplicaKeepDuration":
+ if v != nil {
+ var standByReplicaKeepDuration date.Time
+ err = json.Unmarshal(*v, &standByReplicaKeepDuration)
+ if err != nil {
+ return err
+ }
+ ssup.StandByReplicaKeepDuration = &standByReplicaKeepDuration
+ }
+ case "serviceKind":
+ if v != nil {
+ var serviceKind ServiceKindBasicServiceResourceUpdateProperties
+ err = json.Unmarshal(*v, &serviceKind)
+ if err != nil {
+ return err
+ }
+ ssup.ServiceKind = serviceKind
+ }
+ case "placementConstraints":
+ if v != nil {
+ var placementConstraints string
+ err = json.Unmarshal(*v, &placementConstraints)
+ if err != nil {
+ return err
+ }
+ ssup.PlacementConstraints = &placementConstraints
+ }
+ case "correlationScheme":
+ if v != nil {
+ var correlationScheme []ServiceCorrelationDescription
+ err = json.Unmarshal(*v, &correlationScheme)
+ if err != nil {
+ return err
+ }
+ ssup.CorrelationScheme = &correlationScheme
+ }
+ case "serviceLoadMetrics":
+ if v != nil {
+ var serviceLoadMetrics []ServiceLoadMetricDescription
+ err = json.Unmarshal(*v, &serviceLoadMetrics)
+ if err != nil {
+ return err
+ }
+ ssup.ServiceLoadMetrics = &serviceLoadMetrics
+ }
+ case "servicePlacementPolicies":
+ if v != nil {
+ servicePlacementPolicies, err := unmarshalBasicServicePlacementPolicyDescriptionArray(*v)
+ if err != nil {
+ return err
+ }
+ ssup.ServicePlacementPolicies = &servicePlacementPolicies
+ }
+ case "defaultMoveCost":
+ if v != nil {
+ var defaultMoveCost MoveCost
+ err = json.Unmarshal(*v, &defaultMoveCost)
+ if err != nil {
+ return err
+ }
+ ssup.DefaultMoveCost = defaultMoveCost
+ }
+ }
+ }
+
+ return nil
+}
+
+// StatelessServiceProperties the properties of a stateless service resource.
+type StatelessServiceProperties struct {
+ // InstanceCount - The instance count.
+ InstanceCount *int32 `json:"instanceCount,omitempty"`
+ // ProvisioningState - READ-ONLY; The current deployment or provisioning state, which only appears in the response
+ ProvisioningState *string `json:"provisioningState,omitempty"`
+ // ServiceTypeName - The name of the service type
+ ServiceTypeName *string `json:"serviceTypeName,omitempty"`
+ // PartitionDescription - Describes how the service is partitioned.
+ PartitionDescription BasicPartitionSchemeDescription `json:"partitionDescription,omitempty"`
+ // ServicePackageActivationMode - The activation Mode of the service package. Possible values include: 'SharedProcess', 'ExclusiveProcess'
+ ServicePackageActivationMode ArmServicePackageActivationMode `json:"servicePackageActivationMode,omitempty"`
+ // ServiceKind - Possible values include: 'ServiceKindServiceResourceProperties', 'ServiceKindStateful1', 'ServiceKindStateless1'
+ ServiceKind ServiceKindBasicServiceResourceProperties `json:"serviceKind,omitempty"`
+ // PlacementConstraints - The placement constraints as a string. Placement constraints are boolean expressions on node properties and allow for restricting a service to particular nodes based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)".
+ PlacementConstraints *string `json:"placementConstraints,omitempty"`
+ // CorrelationScheme - A list that describes the correlation of the service with other services.
+ CorrelationScheme *[]ServiceCorrelationDescription `json:"correlationScheme,omitempty"`
+ // ServiceLoadMetrics - The service load metrics is given as an array of ServiceLoadMetricDescription objects.
+ ServiceLoadMetrics *[]ServiceLoadMetricDescription `json:"serviceLoadMetrics,omitempty"`
+ // ServicePlacementPolicies - A list that describes the correlation of the service with other services.
+ ServicePlacementPolicies *[]BasicServicePlacementPolicyDescription `json:"servicePlacementPolicies,omitempty"`
+ // DefaultMoveCost - Specifies the move cost for the service. Possible values include: 'Zero', 'Low', 'Medium', 'High'
+ DefaultMoveCost MoveCost `json:"defaultMoveCost,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for StatelessServiceProperties.
+func (ssp StatelessServiceProperties) MarshalJSON() ([]byte, error) {
+ ssp.ServiceKind = ServiceKindStateless1
+ objectMap := make(map[string]interface{})
+ if ssp.InstanceCount != nil {
+ objectMap["instanceCount"] = ssp.InstanceCount
+ }
+ if ssp.ServiceTypeName != nil {
+ objectMap["serviceTypeName"] = ssp.ServiceTypeName
+ }
+ objectMap["partitionDescription"] = ssp.PartitionDescription
+ if ssp.ServicePackageActivationMode != "" {
+ objectMap["servicePackageActivationMode"] = ssp.ServicePackageActivationMode
+ }
+ if ssp.ServiceKind != "" {
+ objectMap["serviceKind"] = ssp.ServiceKind
+ }
+ if ssp.PlacementConstraints != nil {
+ objectMap["placementConstraints"] = ssp.PlacementConstraints
+ }
+ if ssp.CorrelationScheme != nil {
+ objectMap["correlationScheme"] = ssp.CorrelationScheme
+ }
+ if ssp.ServiceLoadMetrics != nil {
+ objectMap["serviceLoadMetrics"] = ssp.ServiceLoadMetrics
+ }
+ if ssp.ServicePlacementPolicies != nil {
+ objectMap["servicePlacementPolicies"] = ssp.ServicePlacementPolicies
+ }
+ if ssp.DefaultMoveCost != "" {
+ objectMap["defaultMoveCost"] = ssp.DefaultMoveCost
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsStatefulServiceProperties is the BasicServiceResourceProperties implementation for StatelessServiceProperties.
+func (ssp StatelessServiceProperties) AsStatefulServiceProperties() (*StatefulServiceProperties, bool) {
+ return nil, false
+}
+
+// AsStatelessServiceProperties is the BasicServiceResourceProperties implementation for StatelessServiceProperties.
+func (ssp StatelessServiceProperties) AsStatelessServiceProperties() (*StatelessServiceProperties, bool) {
+ return &ssp, true
+}
+
+// AsServiceResourceProperties is the BasicServiceResourceProperties implementation for StatelessServiceProperties.
+func (ssp StatelessServiceProperties) AsServiceResourceProperties() (*ServiceResourceProperties, bool) {
+ return nil, false
+}
+
+// AsBasicServiceResourceProperties is the BasicServiceResourceProperties implementation for StatelessServiceProperties.
+func (ssp StatelessServiceProperties) AsBasicServiceResourceProperties() (BasicServiceResourceProperties, bool) {
+ return &ssp, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for StatelessServiceProperties struct.
+func (ssp *StatelessServiceProperties) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "instanceCount":
+ if v != nil {
+ var instanceCount int32
+ err = json.Unmarshal(*v, &instanceCount)
+ if err != nil {
+ return err
+ }
+ ssp.InstanceCount = &instanceCount
+ }
+ case "provisioningState":
+ if v != nil {
+ var provisioningState string
+ err = json.Unmarshal(*v, &provisioningState)
+ if err != nil {
+ return err
+ }
+ ssp.ProvisioningState = &provisioningState
+ }
+ case "serviceTypeName":
+ if v != nil {
+ var serviceTypeName string
+ err = json.Unmarshal(*v, &serviceTypeName)
+ if err != nil {
+ return err
+ }
+ ssp.ServiceTypeName = &serviceTypeName
+ }
+ case "partitionDescription":
+ if v != nil {
+ partitionDescription, err := unmarshalBasicPartitionSchemeDescription(*v)
+ if err != nil {
+ return err
+ }
+ ssp.PartitionDescription = partitionDescription
+ }
+ case "servicePackageActivationMode":
+ if v != nil {
+ var servicePackageActivationMode ArmServicePackageActivationMode
+ err = json.Unmarshal(*v, &servicePackageActivationMode)
+ if err != nil {
+ return err
+ }
+ ssp.ServicePackageActivationMode = servicePackageActivationMode
+ }
+ case "serviceKind":
+ if v != nil {
+ var serviceKind ServiceKindBasicServiceResourceProperties
+ err = json.Unmarshal(*v, &serviceKind)
+ if err != nil {
+ return err
+ }
+ ssp.ServiceKind = serviceKind
+ }
+ case "placementConstraints":
+ if v != nil {
+ var placementConstraints string
+ err = json.Unmarshal(*v, &placementConstraints)
+ if err != nil {
+ return err
+ }
+ ssp.PlacementConstraints = &placementConstraints
+ }
+ case "correlationScheme":
+ if v != nil {
+ var correlationScheme []ServiceCorrelationDescription
+ err = json.Unmarshal(*v, &correlationScheme)
+ if err != nil {
+ return err
+ }
+ ssp.CorrelationScheme = &correlationScheme
+ }
+ case "serviceLoadMetrics":
+ if v != nil {
+ var serviceLoadMetrics []ServiceLoadMetricDescription
+ err = json.Unmarshal(*v, &serviceLoadMetrics)
+ if err != nil {
+ return err
+ }
+ ssp.ServiceLoadMetrics = &serviceLoadMetrics
+ }
+ case "servicePlacementPolicies":
+ if v != nil {
+ servicePlacementPolicies, err := unmarshalBasicServicePlacementPolicyDescriptionArray(*v)
+ if err != nil {
+ return err
+ }
+ ssp.ServicePlacementPolicies = &servicePlacementPolicies
+ }
+ case "defaultMoveCost":
+ if v != nil {
+ var defaultMoveCost MoveCost
+ err = json.Unmarshal(*v, &defaultMoveCost)
+ if err != nil {
+ return err
+ }
+ ssp.DefaultMoveCost = defaultMoveCost
+ }
+ }
+ }
+
+ return nil
+}
+
+// StatelessServiceUpdateProperties the properties of a stateless service resource for patch operations.
+type StatelessServiceUpdateProperties struct {
+ // InstanceCount - The instance count.
+ InstanceCount *int32 `json:"instanceCount,omitempty"`
+ // ServiceKind - Possible values include: 'ServiceKindBasicServiceResourceUpdatePropertiesServiceKindServiceResourceUpdateProperties', 'ServiceKindBasicServiceResourceUpdatePropertiesServiceKindStateful', 'ServiceKindBasicServiceResourceUpdatePropertiesServiceKindStateless'
+ ServiceKind ServiceKindBasicServiceResourceUpdateProperties `json:"serviceKind,omitempty"`
+ // PlacementConstraints - The placement constraints as a string. Placement constraints are boolean expressions on node properties and allow for restricting a service to particular nodes based on the service requirements. For example, to place a service on nodes where NodeType is blue specify the following: "NodeColor == blue)".
+ PlacementConstraints *string `json:"placementConstraints,omitempty"`
+ // CorrelationScheme - A list that describes the correlation of the service with other services.
+ CorrelationScheme *[]ServiceCorrelationDescription `json:"correlationScheme,omitempty"`
+ // ServiceLoadMetrics - The service load metrics is given as an array of ServiceLoadMetricDescription objects.
+ ServiceLoadMetrics *[]ServiceLoadMetricDescription `json:"serviceLoadMetrics,omitempty"`
+ // ServicePlacementPolicies - A list that describes the correlation of the service with other services.
+ ServicePlacementPolicies *[]BasicServicePlacementPolicyDescription `json:"servicePlacementPolicies,omitempty"`
+ // DefaultMoveCost - Specifies the move cost for the service. Possible values include: 'Zero', 'Low', 'Medium', 'High'
+ DefaultMoveCost MoveCost `json:"defaultMoveCost,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for StatelessServiceUpdateProperties.
+func (ssup StatelessServiceUpdateProperties) MarshalJSON() ([]byte, error) {
+ ssup.ServiceKind = ServiceKindBasicServiceResourceUpdatePropertiesServiceKindStateless
+ objectMap := make(map[string]interface{})
+ if ssup.InstanceCount != nil {
+ objectMap["instanceCount"] = ssup.InstanceCount
+ }
+ if ssup.ServiceKind != "" {
+ objectMap["serviceKind"] = ssup.ServiceKind
+ }
+ if ssup.PlacementConstraints != nil {
+ objectMap["placementConstraints"] = ssup.PlacementConstraints
+ }
+ if ssup.CorrelationScheme != nil {
+ objectMap["correlationScheme"] = ssup.CorrelationScheme
+ }
+ if ssup.ServiceLoadMetrics != nil {
+ objectMap["serviceLoadMetrics"] = ssup.ServiceLoadMetrics
+ }
+ if ssup.ServicePlacementPolicies != nil {
+ objectMap["servicePlacementPolicies"] = ssup.ServicePlacementPolicies
+ }
+ if ssup.DefaultMoveCost != "" {
+ objectMap["defaultMoveCost"] = ssup.DefaultMoveCost
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsStatefulServiceUpdateProperties is the BasicServiceResourceUpdateProperties implementation for StatelessServiceUpdateProperties.
+func (ssup StatelessServiceUpdateProperties) AsStatefulServiceUpdateProperties() (*StatefulServiceUpdateProperties, bool) {
+ return nil, false
+}
+
+// AsStatelessServiceUpdateProperties is the BasicServiceResourceUpdateProperties implementation for StatelessServiceUpdateProperties.
+func (ssup StatelessServiceUpdateProperties) AsStatelessServiceUpdateProperties() (*StatelessServiceUpdateProperties, bool) {
+ return &ssup, true
+}
+
+// AsServiceResourceUpdateProperties is the BasicServiceResourceUpdateProperties implementation for StatelessServiceUpdateProperties.
+func (ssup StatelessServiceUpdateProperties) AsServiceResourceUpdateProperties() (*ServiceResourceUpdateProperties, bool) {
+ return nil, false
+}
+
+// AsBasicServiceResourceUpdateProperties is the BasicServiceResourceUpdateProperties implementation for StatelessServiceUpdateProperties.
+func (ssup StatelessServiceUpdateProperties) AsBasicServiceResourceUpdateProperties() (BasicServiceResourceUpdateProperties, bool) {
+ return &ssup, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for StatelessServiceUpdateProperties struct.
+func (ssup *StatelessServiceUpdateProperties) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "instanceCount":
+ if v != nil {
+ var instanceCount int32
+ err = json.Unmarshal(*v, &instanceCount)
+ if err != nil {
+ return err
+ }
+ ssup.InstanceCount = &instanceCount
+ }
+ case "serviceKind":
+ if v != nil {
+ var serviceKind ServiceKindBasicServiceResourceUpdateProperties
+ err = json.Unmarshal(*v, &serviceKind)
+ if err != nil {
+ return err
+ }
+ ssup.ServiceKind = serviceKind
+ }
+ case "placementConstraints":
+ if v != nil {
+ var placementConstraints string
+ err = json.Unmarshal(*v, &placementConstraints)
+ if err != nil {
+ return err
+ }
+ ssup.PlacementConstraints = &placementConstraints
+ }
+ case "correlationScheme":
+ if v != nil {
+ var correlationScheme []ServiceCorrelationDescription
+ err = json.Unmarshal(*v, &correlationScheme)
+ if err != nil {
+ return err
+ }
+ ssup.CorrelationScheme = &correlationScheme
+ }
+ case "serviceLoadMetrics":
+ if v != nil {
+ var serviceLoadMetrics []ServiceLoadMetricDescription
+ err = json.Unmarshal(*v, &serviceLoadMetrics)
+ if err != nil {
+ return err
+ }
+ ssup.ServiceLoadMetrics = &serviceLoadMetrics
+ }
+ case "servicePlacementPolicies":
+ if v != nil {
+ servicePlacementPolicies, err := unmarshalBasicServicePlacementPolicyDescriptionArray(*v)
+ if err != nil {
+ return err
+ }
+ ssup.ServicePlacementPolicies = &servicePlacementPolicies
+ }
+ case "defaultMoveCost":
+ if v != nil {
+ var defaultMoveCost MoveCost
+ err = json.Unmarshal(*v, &defaultMoveCost)
+ if err != nil {
+ return err
+ }
+ ssup.DefaultMoveCost = defaultMoveCost
+ }
+ }
+ }
+
+ return nil
+}
+
+// UniformInt64RangePartitionSchemeDescription describes a partitioning scheme where an integer range is
+// allocated evenly across a number of partitions.
+type UniformInt64RangePartitionSchemeDescription struct {
+ // Count - The number of partitions.
+ Count *int32 `json:"Count,omitempty"`
+ // LowKey - String indicating the lower bound of the partition key range that
+ // should be split between the partition ‘Count’
+ LowKey *string `json:"LowKey,omitempty"`
+ // HighKey - String indicating the upper bound of the partition key range that
+ // should be split between the partition ‘Count’
+ HighKey *string `json:"HighKey,omitempty"`
+ // PartitionScheme - Possible values include: 'PartitionSchemePartitionSchemeDescription', 'PartitionSchemeNamed', 'PartitionSchemeSingleton', 'PartitionSchemeUniformInt64Range'
+ PartitionScheme PartitionSchemeBasicPartitionSchemeDescription `json:"partitionScheme,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for UniformInt64RangePartitionSchemeDescription.
+func (ui6rpsd UniformInt64RangePartitionSchemeDescription) MarshalJSON() ([]byte, error) {
+ ui6rpsd.PartitionScheme = PartitionSchemeUniformInt64Range
+ objectMap := make(map[string]interface{})
+ if ui6rpsd.Count != nil {
+ objectMap["Count"] = ui6rpsd.Count
+ }
+ if ui6rpsd.LowKey != nil {
+ objectMap["LowKey"] = ui6rpsd.LowKey
+ }
+ if ui6rpsd.HighKey != nil {
+ objectMap["HighKey"] = ui6rpsd.HighKey
+ }
+ if ui6rpsd.PartitionScheme != "" {
+ objectMap["partitionScheme"] = ui6rpsd.PartitionScheme
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsNamedPartitionSchemeDescription is the BasicPartitionSchemeDescription implementation for UniformInt64RangePartitionSchemeDescription.
+func (ui6rpsd UniformInt64RangePartitionSchemeDescription) AsNamedPartitionSchemeDescription() (*NamedPartitionSchemeDescription, bool) {
+ return nil, false
+}
+
+// AsSingletonPartitionSchemeDescription is the BasicPartitionSchemeDescription implementation for UniformInt64RangePartitionSchemeDescription.
+func (ui6rpsd UniformInt64RangePartitionSchemeDescription) AsSingletonPartitionSchemeDescription() (*SingletonPartitionSchemeDescription, bool) {
+ return nil, false
+}
+
+// AsUniformInt64RangePartitionSchemeDescription is the BasicPartitionSchemeDescription implementation for UniformInt64RangePartitionSchemeDescription.
+func (ui6rpsd UniformInt64RangePartitionSchemeDescription) AsUniformInt64RangePartitionSchemeDescription() (*UniformInt64RangePartitionSchemeDescription, bool) {
+ return &ui6rpsd, true
+}
+
+// AsPartitionSchemeDescription is the BasicPartitionSchemeDescription implementation for UniformInt64RangePartitionSchemeDescription.
+func (ui6rpsd UniformInt64RangePartitionSchemeDescription) AsPartitionSchemeDescription() (*PartitionSchemeDescription, bool) {
+ return nil, false
+}
+
+// AsBasicPartitionSchemeDescription is the BasicPartitionSchemeDescription implementation for UniformInt64RangePartitionSchemeDescription.
+func (ui6rpsd UniformInt64RangePartitionSchemeDescription) AsBasicPartitionSchemeDescription() (BasicPartitionSchemeDescription, bool) {
+ return &ui6rpsd, true
+}
diff --git a/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/operations.go b/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/operations.go
new file mode 100644
index 000000000000..212b6ff8ee34
--- /dev/null
+++ b/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/operations.go
@@ -0,0 +1,147 @@
+package servicefabric
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// OperationsClient is the service Fabric Management Client
+type OperationsClient struct {
+ BaseClient
+}
+
+// NewOperationsClient creates an instance of the OperationsClient client.
+func NewOperationsClient(subscriptionID string) OperationsClient {
+ return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client.
+func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient {
+ return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// List get the list of available Service Fabric resource provider API operations.
+func (client OperationsClient) List(ctx context.Context) (result OperationListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List")
+ defer func() {
+ sc := -1
+ if result.olr.Response.Response != nil {
+ sc = result.olr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.OperationsClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.olr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "servicefabric.OperationsClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.olr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.OperationsClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) {
+ const APIVersion = "2019-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPath("/providers/Microsoft.ServiceFabric/operations"),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client OperationsClient) listNextResults(ctx context.Context, lastResults OperationListResult) (result OperationListResult, err error) {
+ req, err := lastResults.operationListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "servicefabric.OperationsClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "servicefabric.OperationsClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.OperationsClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client OperationsClient) ListComplete(ctx context.Context) (result OperationListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx)
+ return
+}
diff --git a/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/servicefabricapi/interfaces.go b/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/servicefabricapi/interfaces.go
new file mode 100644
index 000000000000..623e813daddf
--- /dev/null
+++ b/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/servicefabricapi/interfaces.go
@@ -0,0 +1,95 @@
+package servicefabricapi
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/azure-sdk-for-go/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric"
+ "github.com/Azure/go-autorest/autorest"
+)
+
+// ClustersClientAPI contains the set of methods on the ClustersClient type.
+type ClustersClientAPI interface {
+ Create(ctx context.Context, resourceGroupName string, clusterName string, parameters servicefabric.Cluster) (result servicefabric.ClustersCreateFuture, err error)
+ Delete(ctx context.Context, resourceGroupName string, clusterName string) (result autorest.Response, err error)
+ Get(ctx context.Context, resourceGroupName string, clusterName string) (result servicefabric.Cluster, err error)
+ List(ctx context.Context) (result servicefabric.ClusterListResult, err error)
+ ListByResourceGroup(ctx context.Context, resourceGroupName string) (result servicefabric.ClusterListResult, err error)
+ Update(ctx context.Context, resourceGroupName string, clusterName string, parameters servicefabric.ClusterUpdateParameters) (result servicefabric.ClustersUpdateFuture, err error)
+}
+
+var _ ClustersClientAPI = (*servicefabric.ClustersClient)(nil)
+
+// ClusterVersionsClientAPI contains the set of methods on the ClusterVersionsClient type.
+type ClusterVersionsClientAPI interface {
+ Get(ctx context.Context, location string, clusterVersion string) (result servicefabric.ClusterCodeVersionsListResult, err error)
+ GetByEnvironment(ctx context.Context, location string, environment string, clusterVersion string) (result servicefabric.ClusterCodeVersionsListResult, err error)
+ List(ctx context.Context, location string) (result servicefabric.ClusterCodeVersionsListResult, err error)
+ ListByEnvironment(ctx context.Context, location string, environment string) (result servicefabric.ClusterCodeVersionsListResult, err error)
+}
+
+var _ ClusterVersionsClientAPI = (*servicefabric.ClusterVersionsClient)(nil)
+
+// OperationsClientAPI contains the set of methods on the OperationsClient type.
+type OperationsClientAPI interface {
+ List(ctx context.Context) (result servicefabric.OperationListResultPage, err error)
+}
+
+var _ OperationsClientAPI = (*servicefabric.OperationsClient)(nil)
+
+// ApplicationTypesClientAPI contains the set of methods on the ApplicationTypesClient type.
+type ApplicationTypesClientAPI interface {
+ Create(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, parameters servicefabric.ApplicationTypeResource) (result servicefabric.ApplicationTypeResource, err error)
+ Delete(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string) (result servicefabric.ApplicationTypesDeleteFuture, err error)
+ Get(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string) (result servicefabric.ApplicationTypeResource, err error)
+ List(ctx context.Context, resourceGroupName string, clusterName string) (result servicefabric.ApplicationTypeResourceList, err error)
+}
+
+var _ ApplicationTypesClientAPI = (*servicefabric.ApplicationTypesClient)(nil)
+
+// ApplicationTypeVersionsClientAPI contains the set of methods on the ApplicationTypeVersionsClient type.
+type ApplicationTypeVersionsClientAPI interface {
+ Create(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, version string, parameters servicefabric.ApplicationTypeVersionResource) (result servicefabric.ApplicationTypeVersionsCreateFuture, err error)
+ Delete(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, version string) (result servicefabric.ApplicationTypeVersionsDeleteFuture, err error)
+ Get(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, version string) (result servicefabric.ApplicationTypeVersionResource, err error)
+ List(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string) (result servicefabric.ApplicationTypeVersionResourceList, err error)
+}
+
+var _ ApplicationTypeVersionsClientAPI = (*servicefabric.ApplicationTypeVersionsClient)(nil)
+
+// ApplicationsClientAPI contains the set of methods on the ApplicationsClient type.
+type ApplicationsClientAPI interface {
+ Create(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, parameters servicefabric.ApplicationResource) (result servicefabric.ApplicationsCreateFuture, err error)
+ Delete(ctx context.Context, resourceGroupName string, clusterName string, applicationName string) (result servicefabric.ApplicationsDeleteFuture, err error)
+ Get(ctx context.Context, resourceGroupName string, clusterName string, applicationName string) (result servicefabric.ApplicationResource, err error)
+ List(ctx context.Context, resourceGroupName string, clusterName string) (result servicefabric.ApplicationResourceList, err error)
+ Update(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, parameters servicefabric.ApplicationResourceUpdate) (result servicefabric.ApplicationsUpdateFuture, err error)
+}
+
+var _ ApplicationsClientAPI = (*servicefabric.ApplicationsClient)(nil)
+
+// ServicesClientAPI contains the set of methods on the ServicesClient type.
+type ServicesClientAPI interface {
+ Create(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, serviceName string, parameters servicefabric.ServiceResource) (result servicefabric.ServicesCreateFuture, err error)
+ Delete(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, serviceName string) (result servicefabric.ServicesDeleteFuture, err error)
+ Get(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, serviceName string) (result servicefabric.ServiceResource, err error)
+ List(ctx context.Context, resourceGroupName string, clusterName string, applicationName string) (result servicefabric.ServiceResourceList, err error)
+ Update(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, serviceName string, parameters servicefabric.ServiceResourceUpdate) (result servicefabric.ServicesUpdateFuture, err error)
+}
+
+var _ ServicesClientAPI = (*servicefabric.ServicesClient)(nil)
diff --git a/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/services.go b/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/services.go
new file mode 100644
index 000000000000..7295ca94514c
--- /dev/null
+++ b/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/services.go
@@ -0,0 +1,451 @@
+package servicefabric
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// ServicesClient is the service Fabric Management Client
+type ServicesClient struct {
+ BaseClient
+}
+
+// NewServicesClient creates an instance of the ServicesClient client.
+func NewServicesClient(subscriptionID string) ServicesClient {
+ return NewServicesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewServicesClientWithBaseURI creates an instance of the ServicesClient client.
+func NewServicesClientWithBaseURI(baseURI string, subscriptionID string) ServicesClient {
+ return ServicesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// Create create or update a Service Fabric service resource with the specified name.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// clusterName - the name of the cluster resource.
+// applicationName - the name of the application resource.
+// serviceName - the name of the service resource in the format of {applicationName}~{serviceName}.
+// parameters - the service resource.
+func (client ServicesClient) Create(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, serviceName string, parameters ServiceResource) (result ServicesCreateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ServicesClient.Create")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.CreatePreparer(ctx, resourceGroupName, clusterName, applicationName, serviceName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ServicesClient", "Create", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.CreateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ServicesClient", "Create", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// CreatePreparer prepares the Create request.
+func (client ServicesClient) CreatePreparer(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, serviceName string, parameters ServiceResource) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "applicationName": autorest.Encode("path", applicationName),
+ "clusterName": autorest.Encode("path", clusterName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serviceName": autorest.Encode("path", serviceName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applications/{applicationName}/services/{serviceName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateSender sends the Create request. The method will close the
+// http.Response Body if it receives an error.
+func (client ServicesClient) CreateSender(req *http.Request) (future ServicesCreateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// CreateResponder handles the response to the Create request. The method always
+// closes the http.Response Body.
+func (client ServicesClient) CreateResponder(resp *http.Response) (result ServiceResource, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete delete a Service Fabric service resource with the specified name.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// clusterName - the name of the cluster resource.
+// applicationName - the name of the application resource.
+// serviceName - the name of the service resource in the format of {applicationName}~{serviceName}.
+func (client ServicesClient) Delete(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, serviceName string) (result ServicesDeleteFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ServicesClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, resourceGroupName, clusterName, applicationName, serviceName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ServicesClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.DeleteSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ServicesClient", "Delete", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client ServicesClient) DeletePreparer(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, serviceName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "applicationName": autorest.Encode("path", applicationName),
+ "clusterName": autorest.Encode("path", clusterName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serviceName": autorest.Encode("path", serviceName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applications/{applicationName}/services/{serviceName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client ServicesClient) DeleteSender(req *http.Request) (future ServicesDeleteFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client ServicesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get get a Service Fabric service resource created or in the process of being created in the Service Fabric
+// application resource.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// clusterName - the name of the cluster resource.
+// applicationName - the name of the application resource.
+// serviceName - the name of the service resource in the format of {applicationName}~{serviceName}.
+func (client ServicesClient) Get(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, serviceName string) (result ServiceResource, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ServicesClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, resourceGroupName, clusterName, applicationName, serviceName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ServicesClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "servicefabric.ServicesClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ServicesClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client ServicesClient) GetPreparer(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, serviceName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "applicationName": autorest.Encode("path", applicationName),
+ "clusterName": autorest.Encode("path", clusterName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serviceName": autorest.Encode("path", serviceName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applications/{applicationName}/services/{serviceName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client ServicesClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client ServicesClient) GetResponder(resp *http.Response) (result ServiceResource, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List gets all service resources created or in the process of being created in the Service Fabric application
+// resource.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// clusterName - the name of the cluster resource.
+// applicationName - the name of the application resource.
+func (client ServicesClient) List(ctx context.Context, resourceGroupName string, clusterName string, applicationName string) (result ServiceResourceList, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ServicesClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.ListPreparer(ctx, resourceGroupName, clusterName, applicationName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ServicesClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "servicefabric.ServicesClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ServicesClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client ServicesClient) ListPreparer(ctx context.Context, resourceGroupName string, clusterName string, applicationName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "applicationName": autorest.Encode("path", applicationName),
+ "clusterName": autorest.Encode("path", clusterName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applications/{applicationName}/services", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client ServicesClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client ServicesClient) ListResponder(resp *http.Response) (result ServiceResourceList, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Update update a Service Fabric service resource with the specified name.
+// Parameters:
+// resourceGroupName - the name of the resource group.
+// clusterName - the name of the cluster resource.
+// applicationName - the name of the application resource.
+// serviceName - the name of the service resource in the format of {applicationName}~{serviceName}.
+// parameters - the service resource for patch operations.
+func (client ServicesClient) Update(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, serviceName string, parameters ServiceResourceUpdate) (result ServicesUpdateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ServicesClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.UpdatePreparer(ctx, resourceGroupName, clusterName, applicationName, serviceName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ServicesClient", "Update", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.UpdateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicefabric.ServicesClient", "Update", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// UpdatePreparer prepares the Update request.
+func (client ServicesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, serviceName string, parameters ServiceResourceUpdate) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "applicationName": autorest.Encode("path", applicationName),
+ "clusterName": autorest.Encode("path", clusterName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serviceName": autorest.Encode("path", serviceName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applications/{applicationName}/services/{serviceName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateSender sends the Update request. The method will close the
+// http.Response Body if it receives an error.
+func (client ServicesClient) UpdateSender(req *http.Request) (future ServicesUpdateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// UpdateResponder handles the response to the Update request. The method always
+// closes the http.Response Body.
+func (client ServicesClient) UpdateResponder(resp *http.Response) (result ServiceResource, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/version.go b/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/version.go
new file mode 100644
index 000000000000..74a7ddcbd499
--- /dev/null
+++ b/services/preview/servicefabric/mgmt/2019-03-01-preview/servicefabric/version.go
@@ -0,0 +1,30 @@
+package servicefabric
+
+import "github.com/Azure/azure-sdk-for-go/version"
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+// UserAgent returns the UserAgent string to use when sending http.Requests.
+func UserAgent() string {
+ return "Azure-SDK-For-Go/" + version.Number + " servicefabric/2019-03-01-preview"
+}
+
+// Version returns the semantic version (see http://semver.org) of the client.
+func Version() string {
+ return version.Number
+}
diff --git a/services/preview/sql/mgmt/2015-05-01-preview/sql/encryptionprotectors.go b/services/preview/sql/mgmt/2015-05-01-preview/sql/encryptionprotectors.go
index f0c3e0785901..4bf6cc3947ab 100644
--- a/services/preview/sql/mgmt/2015-05-01-preview/sql/encryptionprotectors.go
+++ b/services/preview/sql/mgmt/2015-05-01-preview/sql/encryptionprotectors.go
@@ -88,6 +88,7 @@ func (client EncryptionProtectorsClient) CreateOrUpdatePreparer(ctx context.Cont
"api-version": APIVersion,
}
+ parameters.Kind = nil
parameters.Location = nil
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
@@ -319,3 +320,81 @@ func (client EncryptionProtectorsClient) ListByServerComplete(ctx context.Contex
result.page, err = client.ListByServer(ctx, resourceGroupName, serverName)
return
}
+
+// Revalidate revalidates an existing encryption protector.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the resource. You can obtain this value
+// from the Azure Resource Manager API or the portal.
+// serverName - the name of the server.
+func (client EncryptionProtectorsClient) Revalidate(ctx context.Context, resourceGroupName string, serverName string) (result EncryptionProtectorsRevalidateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/EncryptionProtectorsClient.Revalidate")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.RevalidatePreparer(ctx, resourceGroupName, serverName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.EncryptionProtectorsClient", "Revalidate", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.RevalidateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.EncryptionProtectorsClient", "Revalidate", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// RevalidatePreparer prepares the Revalidate request.
+func (client EncryptionProtectorsClient) RevalidatePreparer(ctx context.Context, resourceGroupName string, serverName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "encryptionProtectorName": autorest.Encode("path", "current"),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2015-05-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/encryptionProtector/{encryptionProtectorName}/revalidate", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// RevalidateSender sends the Revalidate request. The method will close the
+// http.Response Body if it receives an error.
+func (client EncryptionProtectorsClient) RevalidateSender(req *http.Request) (future EncryptionProtectorsRevalidateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// RevalidateResponder handles the response to the Revalidate request. The method always
+// closes the http.Response Body.
+func (client EncryptionProtectorsClient) RevalidateResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
diff --git a/services/preview/sql/mgmt/2015-05-01-preview/sql/models.go b/services/preview/sql/mgmt/2015-05-01-preview/sql/models.go
index cbc26fccfeec..badf8e36aaf7 100644
--- a/services/preview/sql/mgmt/2015-05-01-preview/sql/models.go
+++ b/services/preview/sql/mgmt/2015-05-01-preview/sql/models.go
@@ -1640,7 +1640,7 @@ type DatabaseBlobAuditingPolicyProperties struct {
// IsStorageSecondaryKeyInUse - Specifies whether storageAccountAccessKey value is the storage's secondary key.
IsStorageSecondaryKeyInUse *bool `json:"isStorageSecondaryKeyInUse,omitempty"`
// IsAzureMonitorTargetEnabled - Specifies whether audit events are sent to Azure Monitor.
- // In order to send the events to Azure Monitor, specify 'State' as 'Enabled' and 'IsAzureMonitorTargetEnabled' as true.
+ // In order to send the events to Azure Monitor, specify 'state' as 'Enabled' and 'isAzureMonitorTargetEnabled' as true.
//
// When using REST API to configure auditing, Diagnostic Settings with 'SQLSecurityAuditEvents' diagnostic logs category on the database should be also created.
// Note that for server level audit you should use the 'master' database as {databaseName}.
@@ -3022,7 +3022,7 @@ func (epu *ElasticPoolUpdate) UnmarshalJSON(body []byte) error {
// EncryptionProtector the server encryption protector.
type EncryptionProtector struct {
autorest.Response `json:"-"`
- // Kind - Kind of encryption protector. This is metadata used for the Azure portal experience.
+ // Kind - READ-ONLY; Kind of encryption protector. This is metadata used for the Azure portal experience.
Kind *string `json:"kind,omitempty"`
// Location - READ-ONLY; Resource location.
Location *string `json:"location,omitempty"`
@@ -3039,9 +3039,6 @@ type EncryptionProtector struct {
// MarshalJSON is the custom marshaler for EncryptionProtector.
func (ep EncryptionProtector) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
- if ep.Kind != nil {
- objectMap["kind"] = ep.Kind
- }
if ep.EncryptionProtectorProperties != nil {
objectMap["properties"] = ep.EncryptionProtectorProperties
}
@@ -3307,6 +3304,29 @@ func (future *EncryptionProtectorsCreateOrUpdateFuture) Result(client Encryption
return
}
+// EncryptionProtectorsRevalidateFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type EncryptionProtectorsRevalidateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *EncryptionProtectorsRevalidateFuture) Result(client EncryptionProtectorsClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.EncryptionProtectorsRevalidateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("sql.EncryptionProtectorsRevalidateFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
// ExportRequest export database parameters.
type ExportRequest struct {
// StorageKeyType - The type of the storage key to use. Possible values include: 'StorageAccessKey', 'SharedAccessKey'
diff --git a/services/preview/sql/mgmt/2015-05-01-preview/sql/sqlapi/interfaces.go b/services/preview/sql/mgmt/2015-05-01-preview/sql/sqlapi/interfaces.go
index f7cab67fbe1a..f41524b79ca6 100644
--- a/services/preview/sql/mgmt/2015-05-01-preview/sql/sqlapi/interfaces.go
+++ b/services/preview/sql/mgmt/2015-05-01-preview/sql/sqlapi/interfaces.go
@@ -273,6 +273,7 @@ type EncryptionProtectorsClientAPI interface {
CreateOrUpdate(ctx context.Context, resourceGroupName string, serverName string, parameters sql.EncryptionProtector) (result sql.EncryptionProtectorsCreateOrUpdateFuture, err error)
Get(ctx context.Context, resourceGroupName string, serverName string) (result sql.EncryptionProtector, err error)
ListByServer(ctx context.Context, resourceGroupName string, serverName string) (result sql.EncryptionProtectorListResultPage, err error)
+ Revalidate(ctx context.Context, resourceGroupName string, serverName string) (result sql.EncryptionProtectorsRevalidateFuture, err error)
}
var _ EncryptionProtectorsClientAPI = (*sql.EncryptionProtectorsClient)(nil)
diff --git a/services/preview/sql/mgmt/2017-03-01-preview/sql/encryptionprotectors.go b/services/preview/sql/mgmt/2017-03-01-preview/sql/encryptionprotectors.go
index f0c3e0785901..4bf6cc3947ab 100644
--- a/services/preview/sql/mgmt/2017-03-01-preview/sql/encryptionprotectors.go
+++ b/services/preview/sql/mgmt/2017-03-01-preview/sql/encryptionprotectors.go
@@ -88,6 +88,7 @@ func (client EncryptionProtectorsClient) CreateOrUpdatePreparer(ctx context.Cont
"api-version": APIVersion,
}
+ parameters.Kind = nil
parameters.Location = nil
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
@@ -319,3 +320,81 @@ func (client EncryptionProtectorsClient) ListByServerComplete(ctx context.Contex
result.page, err = client.ListByServer(ctx, resourceGroupName, serverName)
return
}
+
+// Revalidate revalidates an existing encryption protector.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the resource. You can obtain this value
+// from the Azure Resource Manager API or the portal.
+// serverName - the name of the server.
+func (client EncryptionProtectorsClient) Revalidate(ctx context.Context, resourceGroupName string, serverName string) (result EncryptionProtectorsRevalidateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/EncryptionProtectorsClient.Revalidate")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.RevalidatePreparer(ctx, resourceGroupName, serverName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.EncryptionProtectorsClient", "Revalidate", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.RevalidateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.EncryptionProtectorsClient", "Revalidate", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// RevalidatePreparer prepares the Revalidate request.
+func (client EncryptionProtectorsClient) RevalidatePreparer(ctx context.Context, resourceGroupName string, serverName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "encryptionProtectorName": autorest.Encode("path", "current"),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "serverName": autorest.Encode("path", serverName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2015-05-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/encryptionProtector/{encryptionProtectorName}/revalidate", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// RevalidateSender sends the Revalidate request. The method will close the
+// http.Response Body if it receives an error.
+func (client EncryptionProtectorsClient) RevalidateSender(req *http.Request) (future EncryptionProtectorsRevalidateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// RevalidateResponder handles the response to the Revalidate request. The method always
+// closes the http.Response Body.
+func (client EncryptionProtectorsClient) RevalidateResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
diff --git a/services/preview/sql/mgmt/2017-03-01-preview/sql/managedinstanceadministrators.go b/services/preview/sql/mgmt/2017-03-01-preview/sql/managedinstanceadministrators.go
new file mode 100644
index 000000000000..ce3fa15903af
--- /dev/null
+++ b/services/preview/sql/mgmt/2017-03-01-preview/sql/managedinstanceadministrators.go
@@ -0,0 +1,413 @@
+package sql
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// ManagedInstanceAdministratorsClient is the the Azure SQL Database management API provides a RESTful set of web
+// services that interact with Azure SQL Database services to manage your databases. The API enables you to create,
+// retrieve, update, and delete databases.
+type ManagedInstanceAdministratorsClient struct {
+ BaseClient
+}
+
+// NewManagedInstanceAdministratorsClient creates an instance of the ManagedInstanceAdministratorsClient client.
+func NewManagedInstanceAdministratorsClient(subscriptionID string) ManagedInstanceAdministratorsClient {
+ return NewManagedInstanceAdministratorsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewManagedInstanceAdministratorsClientWithBaseURI creates an instance of the ManagedInstanceAdministratorsClient
+// client.
+func NewManagedInstanceAdministratorsClientWithBaseURI(baseURI string, subscriptionID string) ManagedInstanceAdministratorsClient {
+ return ManagedInstanceAdministratorsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate creates or updates a managed instance administrator.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the resource. You can obtain this value
+// from the Azure Resource Manager API or the portal.
+// managedInstanceName - the name of the managed instance.
+// administratorName - the requested administrator name.
+// parameters - the requested administrator parameters.
+func (client ManagedInstanceAdministratorsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, managedInstanceName string, administratorName string, parameters ManagedInstanceAdministrator) (result ManagedInstanceAdministratorsCreateOrUpdateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedInstanceAdministratorsClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters.ManagedInstanceAdministratorProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.ManagedInstanceAdministratorProperties.AdministratorType", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.ManagedInstanceAdministratorProperties.Login", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "parameters.ManagedInstanceAdministratorProperties.Sid", Name: validation.Null, Rule: true, Chain: nil},
+ }}}}}); err != nil {
+ return result, validation.NewError("sql.ManagedInstanceAdministratorsClient", "CreateOrUpdate", err.Error())
+ }
+
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, managedInstanceName, administratorName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.ManagedInstanceAdministratorsClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.ManagedInstanceAdministratorsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client ManagedInstanceAdministratorsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, managedInstanceName string, administratorName string, parameters ManagedInstanceAdministrator) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "administratorName": autorest.Encode("path", administratorName),
+ "managedInstanceName": autorest.Encode("path", managedInstanceName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2017-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/administrators/{administratorName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client ManagedInstanceAdministratorsClient) CreateOrUpdateSender(req *http.Request) (future ManagedInstanceAdministratorsCreateOrUpdateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client ManagedInstanceAdministratorsClient) CreateOrUpdateResponder(resp *http.Response) (result ManagedInstanceAdministrator, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes a managed instance administrator.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the resource. You can obtain this value
+// from the Azure Resource Manager API or the portal.
+// managedInstanceName - the name of the managed instance.
+// administratorName - the administrator name.
+func (client ManagedInstanceAdministratorsClient) Delete(ctx context.Context, resourceGroupName string, managedInstanceName string, administratorName string) (result ManagedInstanceAdministratorsDeleteFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedInstanceAdministratorsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, resourceGroupName, managedInstanceName, administratorName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.ManagedInstanceAdministratorsClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.DeleteSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.ManagedInstanceAdministratorsClient", "Delete", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client ManagedInstanceAdministratorsClient) DeletePreparer(ctx context.Context, resourceGroupName string, managedInstanceName string, administratorName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "administratorName": autorest.Encode("path", administratorName),
+ "managedInstanceName": autorest.Encode("path", managedInstanceName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2017-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/administrators/{administratorName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client ManagedInstanceAdministratorsClient) DeleteSender(req *http.Request) (future ManagedInstanceAdministratorsDeleteFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client ManagedInstanceAdministratorsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets a managed instance administrator.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the resource. You can obtain this value
+// from the Azure Resource Manager API or the portal.
+// managedInstanceName - the name of the managed instance.
+// administratorName - the administrator name.
+func (client ManagedInstanceAdministratorsClient) Get(ctx context.Context, resourceGroupName string, managedInstanceName string, administratorName string) (result ManagedInstanceAdministrator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedInstanceAdministratorsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, resourceGroupName, managedInstanceName, administratorName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.ManagedInstanceAdministratorsClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "sql.ManagedInstanceAdministratorsClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.ManagedInstanceAdministratorsClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client ManagedInstanceAdministratorsClient) GetPreparer(ctx context.Context, resourceGroupName string, managedInstanceName string, administratorName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "administratorName": autorest.Encode("path", administratorName),
+ "managedInstanceName": autorest.Encode("path", managedInstanceName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2017-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/administrators/{administratorName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client ManagedInstanceAdministratorsClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client ManagedInstanceAdministratorsClient) GetResponder(resp *http.Response) (result ManagedInstanceAdministrator, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByInstance gets a list of managed instance administrators.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the resource. You can obtain this value
+// from the Azure Resource Manager API or the portal.
+// managedInstanceName - the name of the managed instance.
+func (client ManagedInstanceAdministratorsClient) ListByInstance(ctx context.Context, resourceGroupName string, managedInstanceName string) (result ManagedInstanceAdministratorListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedInstanceAdministratorsClient.ListByInstance")
+ defer func() {
+ sc := -1
+ if result.mialr.Response.Response != nil {
+ sc = result.mialr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listByInstanceNextResults
+ req, err := client.ListByInstancePreparer(ctx, resourceGroupName, managedInstanceName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.ManagedInstanceAdministratorsClient", "ListByInstance", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByInstanceSender(req)
+ if err != nil {
+ result.mialr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "sql.ManagedInstanceAdministratorsClient", "ListByInstance", resp, "Failure sending request")
+ return
+ }
+
+ result.mialr, err = client.ListByInstanceResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.ManagedInstanceAdministratorsClient", "ListByInstance", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByInstancePreparer prepares the ListByInstance request.
+func (client ManagedInstanceAdministratorsClient) ListByInstancePreparer(ctx context.Context, resourceGroupName string, managedInstanceName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "managedInstanceName": autorest.Encode("path", managedInstanceName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2017-03-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/administrators", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByInstanceSender sends the ListByInstance request. The method will close the
+// http.Response Body if it receives an error.
+func (client ManagedInstanceAdministratorsClient) ListByInstanceSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByInstanceResponder handles the response to the ListByInstance request. The method always
+// closes the http.Response Body.
+func (client ManagedInstanceAdministratorsClient) ListByInstanceResponder(resp *http.Response) (result ManagedInstanceAdministratorListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByInstanceNextResults retrieves the next set of results, if any.
+func (client ManagedInstanceAdministratorsClient) listByInstanceNextResults(ctx context.Context, lastResults ManagedInstanceAdministratorListResult) (result ManagedInstanceAdministratorListResult, err error) {
+ req, err := lastResults.managedInstanceAdministratorListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "sql.ManagedInstanceAdministratorsClient", "listByInstanceNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByInstanceSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "sql.ManagedInstanceAdministratorsClient", "listByInstanceNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByInstanceResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.ManagedInstanceAdministratorsClient", "listByInstanceNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByInstanceComplete enumerates all values, automatically crossing page boundaries as required.
+func (client ManagedInstanceAdministratorsClient) ListByInstanceComplete(ctx context.Context, resourceGroupName string, managedInstanceName string) (result ManagedInstanceAdministratorListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedInstanceAdministratorsClient.ListByInstance")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByInstance(ctx, resourceGroupName, managedInstanceName)
+ return
+}
diff --git a/services/preview/sql/mgmt/2017-03-01-preview/sql/models.go b/services/preview/sql/mgmt/2017-03-01-preview/sql/models.go
index 8fa47e4e3402..090de37d04fa 100644
--- a/services/preview/sql/mgmt/2017-03-01-preview/sql/models.go
+++ b/services/preview/sql/mgmt/2017-03-01-preview/sql/models.go
@@ -2179,7 +2179,7 @@ type DatabaseBlobAuditingPolicyProperties struct {
// IsStorageSecondaryKeyInUse - Specifies whether storageAccountAccessKey value is the storage's secondary key.
IsStorageSecondaryKeyInUse *bool `json:"isStorageSecondaryKeyInUse,omitempty"`
// IsAzureMonitorTargetEnabled - Specifies whether audit events are sent to Azure Monitor.
- // In order to send the events to Azure Monitor, specify 'State' as 'Enabled' and 'IsAzureMonitorTargetEnabled' as true.
+ // In order to send the events to Azure Monitor, specify 'state' as 'Enabled' and 'isAzureMonitorTargetEnabled' as true.
//
// When using REST API to configure auditing, Diagnostic Settings with 'SQLSecurityAuditEvents' diagnostic logs category on the database should be also created.
// Note that for server level audit you should use the 'master' database as {databaseName}.
@@ -4205,7 +4205,7 @@ func (epu *ElasticPoolUpdate) UnmarshalJSON(body []byte) error {
// EncryptionProtector the server encryption protector.
type EncryptionProtector struct {
autorest.Response `json:"-"`
- // Kind - Kind of encryption protector. This is metadata used for the Azure portal experience.
+ // Kind - READ-ONLY; Kind of encryption protector. This is metadata used for the Azure portal experience.
Kind *string `json:"kind,omitempty"`
// Location - READ-ONLY; Resource location.
Location *string `json:"location,omitempty"`
@@ -4222,9 +4222,6 @@ type EncryptionProtector struct {
// MarshalJSON is the custom marshaler for EncryptionProtector.
func (ep EncryptionProtector) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
- if ep.Kind != nil {
- objectMap["kind"] = ep.Kind
- }
if ep.EncryptionProtectorProperties != nil {
objectMap["properties"] = ep.EncryptionProtectorProperties
}
@@ -4490,6 +4487,29 @@ func (future *EncryptionProtectorsCreateOrUpdateFuture) Result(client Encryption
return
}
+// EncryptionProtectorsRevalidateFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type EncryptionProtectorsRevalidateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *EncryptionProtectorsRevalidateFuture) Result(client EncryptionProtectorsClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.EncryptionProtectorsRevalidateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("sql.EncryptionProtectorsRevalidateFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
// ExportRequest export database parameters.
type ExportRequest struct {
// StorageKeyType - The type of the storage key to use. Possible values include: 'StorageAccessKey', 'SharedAccessKey'
@@ -4654,7 +4674,7 @@ type ExtendedDatabaseBlobAuditingPolicyProperties struct {
// IsStorageSecondaryKeyInUse - Specifies whether storageAccountAccessKey value is the storage's secondary key.
IsStorageSecondaryKeyInUse *bool `json:"isStorageSecondaryKeyInUse,omitempty"`
// IsAzureMonitorTargetEnabled - Specifies whether audit events are sent to Azure Monitor.
- // In order to send the events to Azure Monitor, specify 'State' as 'Enabled' and 'IsAzureMonitorTargetEnabled' as true.
+ // In order to send the events to Azure Monitor, specify 'state' as 'Enabled' and 'isAzureMonitorTargetEnabled' as true.
//
// When using REST API to configure auditing, Diagnostic Settings with 'SQLSecurityAuditEvents' diagnostic logs category on the database should be also created.
// Note that for server level audit you should use the 'master' database as {databaseName}.
@@ -4844,7 +4864,7 @@ type ExtendedServerBlobAuditingPolicyProperties struct {
// IsStorageSecondaryKeyInUse - Specifies whether storageAccountAccessKey value is the storage's secondary key.
IsStorageSecondaryKeyInUse *bool `json:"isStorageSecondaryKeyInUse,omitempty"`
// IsAzureMonitorTargetEnabled - Specifies whether audit events are sent to Azure Monitor.
- // In order to send the events to Azure Monitor, specify 'State' as 'Enabled' and 'IsAzureMonitorTargetEnabled' as true.
+ // In order to send the events to Azure Monitor, specify 'state' as 'Enabled' and 'isAzureMonitorTargetEnabled' as true.
//
// When using REST API to configure auditing, Diagnostic Settings with 'SQLSecurityAuditEvents' diagnostic logs category on the database should be also created.
// Note that for server level audit you should use the 'master' database as {databaseName}.
@@ -8608,6 +8628,290 @@ func (mi *ManagedInstance) UnmarshalJSON(body []byte) error {
return nil
}
+// ManagedInstanceAdministrator an Azure SQL managed instance administrator.
+type ManagedInstanceAdministrator struct {
+ autorest.Response `json:"-"`
+ // ManagedInstanceAdministratorProperties - Resource properties.
+ *ManagedInstanceAdministratorProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Resource ID.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Resource name.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Resource type.
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ManagedInstanceAdministrator.
+func (mia ManagedInstanceAdministrator) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if mia.ManagedInstanceAdministratorProperties != nil {
+ objectMap["properties"] = mia.ManagedInstanceAdministratorProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for ManagedInstanceAdministrator struct.
+func (mia *ManagedInstanceAdministrator) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var managedInstanceAdministratorProperties ManagedInstanceAdministratorProperties
+ err = json.Unmarshal(*v, &managedInstanceAdministratorProperties)
+ if err != nil {
+ return err
+ }
+ mia.ManagedInstanceAdministratorProperties = &managedInstanceAdministratorProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ mia.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ mia.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ mia.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// ManagedInstanceAdministratorListResult a list of managed instance administrators.
+type ManagedInstanceAdministratorListResult struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; Array of results.
+ Value *[]ManagedInstanceAdministrator `json:"value,omitempty"`
+ // NextLink - READ-ONLY; Link to retrieve next page of results.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// ManagedInstanceAdministratorListResultIterator provides access to a complete listing of
+// ManagedInstanceAdministrator values.
+type ManagedInstanceAdministratorListResultIterator struct {
+ i int
+ page ManagedInstanceAdministratorListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *ManagedInstanceAdministratorListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedInstanceAdministratorListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *ManagedInstanceAdministratorListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter ManagedInstanceAdministratorListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter ManagedInstanceAdministratorListResultIterator) Response() ManagedInstanceAdministratorListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter ManagedInstanceAdministratorListResultIterator) Value() ManagedInstanceAdministrator {
+ if !iter.page.NotDone() {
+ return ManagedInstanceAdministrator{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the ManagedInstanceAdministratorListResultIterator type.
+func NewManagedInstanceAdministratorListResultIterator(page ManagedInstanceAdministratorListResultPage) ManagedInstanceAdministratorListResultIterator {
+ return ManagedInstanceAdministratorListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (mialr ManagedInstanceAdministratorListResult) IsEmpty() bool {
+ return mialr.Value == nil || len(*mialr.Value) == 0
+}
+
+// managedInstanceAdministratorListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (mialr ManagedInstanceAdministratorListResult) managedInstanceAdministratorListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if mialr.NextLink == nil || len(to.String(mialr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(mialr.NextLink)))
+}
+
+// ManagedInstanceAdministratorListResultPage contains a page of ManagedInstanceAdministrator values.
+type ManagedInstanceAdministratorListResultPage struct {
+ fn func(context.Context, ManagedInstanceAdministratorListResult) (ManagedInstanceAdministratorListResult, error)
+ mialr ManagedInstanceAdministratorListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *ManagedInstanceAdministratorListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedInstanceAdministratorListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.mialr)
+ if err != nil {
+ return err
+ }
+ page.mialr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *ManagedInstanceAdministratorListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page ManagedInstanceAdministratorListResultPage) NotDone() bool {
+ return !page.mialr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page ManagedInstanceAdministratorListResultPage) Response() ManagedInstanceAdministratorListResult {
+ return page.mialr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page ManagedInstanceAdministratorListResultPage) Values() []ManagedInstanceAdministrator {
+ if page.mialr.IsEmpty() {
+ return nil
+ }
+ return *page.mialr.Value
+}
+
+// Creates a new instance of the ManagedInstanceAdministratorListResultPage type.
+func NewManagedInstanceAdministratorListResultPage(getNextPage func(context.Context, ManagedInstanceAdministratorListResult) (ManagedInstanceAdministratorListResult, error)) ManagedInstanceAdministratorListResultPage {
+ return ManagedInstanceAdministratorListResultPage{fn: getNextPage}
+}
+
+// ManagedInstanceAdministratorProperties the properties of a managed instance administrator.
+type ManagedInstanceAdministratorProperties struct {
+ // AdministratorType - Type of the managed instance administrator.
+ AdministratorType *string `json:"administratorType,omitempty"`
+ // Login - Login name of the managed instance administrator.
+ Login *string `json:"login,omitempty"`
+ // Sid - SID (object ID) of the managed instance administrator.
+ Sid *uuid.UUID `json:"sid,omitempty"`
+ // TenantID - Tenant ID of the managed instance administrator.
+ TenantID *uuid.UUID `json:"tenantId,omitempty"`
+}
+
+// ManagedInstanceAdministratorsCreateOrUpdateFuture an abstraction for monitoring and retrieving the
+// results of a long-running operation.
+type ManagedInstanceAdministratorsCreateOrUpdateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *ManagedInstanceAdministratorsCreateOrUpdateFuture) Result(client ManagedInstanceAdministratorsClient) (mia ManagedInstanceAdministrator, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.ManagedInstanceAdministratorsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("sql.ManagedInstanceAdministratorsCreateOrUpdateFuture")
+ return
+ }
+ sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ if mia.Response.Response, err = future.GetResult(sender); err == nil && mia.Response.Response.StatusCode != http.StatusNoContent {
+ mia, err = client.CreateOrUpdateResponder(mia.Response.Response)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.ManagedInstanceAdministratorsCreateOrUpdateFuture", "Result", mia.Response.Response, "Failure responding to request")
+ }
+ }
+ return
+}
+
+// ManagedInstanceAdministratorsDeleteFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type ManagedInstanceAdministratorsDeleteFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *ManagedInstanceAdministratorsDeleteFuture) Result(client ManagedInstanceAdministratorsClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.ManagedInstanceAdministratorsDeleteFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("sql.ManagedInstanceAdministratorsDeleteFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
// ManagedInstanceListResult a list of managed instances.
type ManagedInstanceListResult struct {
autorest.Response `json:"-"`
@@ -10912,7 +11216,7 @@ type ServerBlobAuditingPolicyProperties struct {
// IsStorageSecondaryKeyInUse - Specifies whether storageAccountAccessKey value is the storage's secondary key.
IsStorageSecondaryKeyInUse *bool `json:"isStorageSecondaryKeyInUse,omitempty"`
// IsAzureMonitorTargetEnabled - Specifies whether audit events are sent to Azure Monitor.
- // In order to send the events to Azure Monitor, specify 'State' as 'Enabled' and 'IsAzureMonitorTargetEnabled' as true.
+ // In order to send the events to Azure Monitor, specify 'state' as 'Enabled' and 'isAzureMonitorTargetEnabled' as true.
//
// When using REST API to configure auditing, Diagnostic Settings with 'SQLSecurityAuditEvents' diagnostic logs category on the database should be also created.
// Note that for server level audit you should use the 'master' database as {databaseName}.
diff --git a/services/preview/sql/mgmt/2017-03-01-preview/sql/sqlapi/interfaces.go b/services/preview/sql/mgmt/2017-03-01-preview/sql/sqlapi/interfaces.go
index 08673f8958ab..5813776a86e3 100644
--- a/services/preview/sql/mgmt/2017-03-01-preview/sql/sqlapi/interfaces.go
+++ b/services/preview/sql/mgmt/2017-03-01-preview/sql/sqlapi/interfaces.go
@@ -269,6 +269,7 @@ type EncryptionProtectorsClientAPI interface {
CreateOrUpdate(ctx context.Context, resourceGroupName string, serverName string, parameters sql.EncryptionProtector) (result sql.EncryptionProtectorsCreateOrUpdateFuture, err error)
Get(ctx context.Context, resourceGroupName string, serverName string) (result sql.EncryptionProtector, err error)
ListByServer(ctx context.Context, resourceGroupName string, serverName string) (result sql.EncryptionProtectorListResultPage, err error)
+ Revalidate(ctx context.Context, resourceGroupName string, serverName string) (result sql.EncryptionProtectorsRevalidateFuture, err error)
}
var _ EncryptionProtectorsClientAPI = (*sql.EncryptionProtectorsClient)(nil)
@@ -577,6 +578,16 @@ type SensitivityLabelsClientAPI interface {
var _ SensitivityLabelsClientAPI = (*sql.SensitivityLabelsClient)(nil)
+// ManagedInstanceAdministratorsClientAPI contains the set of methods on the ManagedInstanceAdministratorsClient type.
+type ManagedInstanceAdministratorsClientAPI interface {
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, managedInstanceName string, administratorName string, parameters sql.ManagedInstanceAdministrator) (result sql.ManagedInstanceAdministratorsCreateOrUpdateFuture, err error)
+ Delete(ctx context.Context, resourceGroupName string, managedInstanceName string, administratorName string) (result sql.ManagedInstanceAdministratorsDeleteFuture, err error)
+ Get(ctx context.Context, resourceGroupName string, managedInstanceName string, administratorName string) (result sql.ManagedInstanceAdministrator, err error)
+ ListByInstance(ctx context.Context, resourceGroupName string, managedInstanceName string) (result sql.ManagedInstanceAdministratorListResultPage, err error)
+}
+
+var _ ManagedInstanceAdministratorsClientAPI = (*sql.ManagedInstanceAdministratorsClient)(nil)
+
// ServerAutomaticTuningClientAPI contains the set of methods on the ServerAutomaticTuningClient type.
type ServerAutomaticTuningClientAPI interface {
Get(ctx context.Context, resourceGroupName string, serverName string) (result sql.ServerAutomaticTuning, err error)
diff --git a/services/preview/sql/mgmt/2017-10-01-preview/sql/managedinstanceencryptionprotectors.go b/services/preview/sql/mgmt/2017-10-01-preview/sql/managedinstanceencryptionprotectors.go
index cf433d5a1242..4f0f12f184f1 100644
--- a/services/preview/sql/mgmt/2017-10-01-preview/sql/managedinstanceencryptionprotectors.go
+++ b/services/preview/sql/mgmt/2017-10-01-preview/sql/managedinstanceencryptionprotectors.go
@@ -321,3 +321,81 @@ func (client ManagedInstanceEncryptionProtectorsClient) ListByInstanceComplete(c
result.page, err = client.ListByInstance(ctx, resourceGroupName, managedInstanceName)
return
}
+
+// Revalidate revalidates an existing encryption protector.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains the resource. You can obtain this value
+// from the Azure Resource Manager API or the portal.
+// managedInstanceName - the name of the managed instance.
+func (client ManagedInstanceEncryptionProtectorsClient) Revalidate(ctx context.Context, resourceGroupName string, managedInstanceName string) (result ManagedInstanceEncryptionProtectorsRevalidateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ManagedInstanceEncryptionProtectorsClient.Revalidate")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.RevalidatePreparer(ctx, resourceGroupName, managedInstanceName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.ManagedInstanceEncryptionProtectorsClient", "Revalidate", nil, "Failure preparing request")
+ return
+ }
+
+ result, err = client.RevalidateSender(req)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.ManagedInstanceEncryptionProtectorsClient", "Revalidate", result.Response(), "Failure sending request")
+ return
+ }
+
+ return
+}
+
+// RevalidatePreparer prepares the Revalidate request.
+func (client ManagedInstanceEncryptionProtectorsClient) RevalidatePreparer(ctx context.Context, resourceGroupName string, managedInstanceName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "encryptionProtectorName": autorest.Encode("path", "current"),
+ "managedInstanceName": autorest.Encode("path", managedInstanceName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2017-10-01-preview"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/encryptionProtector/{encryptionProtectorName}/revalidate", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// RevalidateSender sends the Revalidate request. The method will close the
+// http.Response Body if it receives an error.
+func (client ManagedInstanceEncryptionProtectorsClient) RevalidateSender(req *http.Request) (future ManagedInstanceEncryptionProtectorsRevalidateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ var resp *http.Response
+ resp, err = autorest.SendWithSender(client, req, sd...)
+ if err != nil {
+ return
+ }
+ future.Future, err = azure.NewFutureFromResponse(resp)
+ return
+}
+
+// RevalidateResponder handles the response to the Revalidate request. The method always
+// closes the http.Response Body.
+func (client ManagedInstanceEncryptionProtectorsClient) RevalidateResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
diff --git a/services/preview/sql/mgmt/2017-10-01-preview/sql/models.go b/services/preview/sql/mgmt/2017-10-01-preview/sql/models.go
index 47a9a57126e2..b015916af93e 100644
--- a/services/preview/sql/mgmt/2017-10-01-preview/sql/models.go
+++ b/services/preview/sql/mgmt/2017-10-01-preview/sql/models.go
@@ -3477,6 +3477,29 @@ func (future *ManagedInstanceEncryptionProtectorsCreateOrUpdateFuture) Result(cl
return
}
+// ManagedInstanceEncryptionProtectorsRevalidateFuture an abstraction for monitoring and retrieving the
+// results of a long-running operation.
+type ManagedInstanceEncryptionProtectorsRevalidateFuture struct {
+ azure.Future
+}
+
+// Result returns the result of the asynchronous operation.
+// If the operation has not completed it will return an error.
+func (future *ManagedInstanceEncryptionProtectorsRevalidateFuture) Result(client ManagedInstanceEncryptionProtectorsClient) (ar autorest.Response, err error) {
+ var done bool
+ done, err = future.DoneWithContext(context.Background(), client)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "sql.ManagedInstanceEncryptionProtectorsRevalidateFuture", "Result", future.Response(), "Polling failure")
+ return
+ }
+ if !done {
+ err = azure.NewAsyncOpIncompleteError("sql.ManagedInstanceEncryptionProtectorsRevalidateFuture")
+ return
+ }
+ ar.Response = future.Response()
+ return
+}
+
// ManagedInstanceFamilyCapability the managed server family capability.
type ManagedInstanceFamilyCapability struct {
// Name - READ-ONLY; Family name.
diff --git a/services/preview/sql/mgmt/2017-10-01-preview/sql/sqlapi/interfaces.go b/services/preview/sql/mgmt/2017-10-01-preview/sql/sqlapi/interfaces.go
index aa93c1ca8a3f..9b696e175f46 100644
--- a/services/preview/sql/mgmt/2017-10-01-preview/sql/sqlapi/interfaces.go
+++ b/services/preview/sql/mgmt/2017-10-01-preview/sql/sqlapi/interfaces.go
@@ -164,6 +164,7 @@ type ManagedInstanceEncryptionProtectorsClientAPI interface {
CreateOrUpdate(ctx context.Context, resourceGroupName string, managedInstanceName string, parameters sql.ManagedInstanceEncryptionProtector) (result sql.ManagedInstanceEncryptionProtectorsCreateOrUpdateFuture, err error)
Get(ctx context.Context, resourceGroupName string, managedInstanceName string) (result sql.ManagedInstanceEncryptionProtector, err error)
ListByInstance(ctx context.Context, resourceGroupName string, managedInstanceName string) (result sql.ManagedInstanceEncryptionProtectorListResultPage, err error)
+ Revalidate(ctx context.Context, resourceGroupName string, managedInstanceName string) (result sql.ManagedInstanceEncryptionProtectorsRevalidateFuture, err error)
}
var _ ManagedInstanceEncryptionProtectorsClientAPI = (*sql.ManagedInstanceEncryptionProtectorsClient)(nil)
diff --git a/services/recoveryservices/mgmt/2016-12-01/backup/models.go b/services/recoveryservices/mgmt/2016-12-01/backup/models.go
index 4d929d2c726c..9ce8a646a248 100644
--- a/services/recoveryservices/mgmt/2016-12-01/backup/models.go
+++ b/services/recoveryservices/mgmt/2016-12-01/backup/models.go
@@ -1678,6 +1678,16 @@ type AzureFileshareProtectedItem struct {
BackupSetName *string `json:"backupSetName,omitempty"`
// CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover'
CreateMode CreateMode `json:"createMode,omitempty"`
+ // DeferredDeleteTimeInUTC - Time for deferred deletion in UTC
+ DeferredDeleteTimeInUTC *date.Time `json:"deferredDeleteTimeInUTC,omitempty"`
+ // IsScheduledForDeferredDelete - Flag to identify whether the DS is scheduled for deferred delete
+ IsScheduledForDeferredDelete *bool `json:"isScheduledForDeferredDelete,omitempty"`
+ // DeferredDeleteTimeRemaining - Time remaining before the DS marked for deferred delete is permanently deleted
+ DeferredDeleteTimeRemaining *string `json:"deferredDeleteTimeRemaining,omitempty"`
+ // IsDeferredDeleteScheduleUpcoming - Flag to identify whether the deferred deleted DS is to be purged soon
+ IsDeferredDeleteScheduleUpcoming *bool `json:"isDeferredDeleteScheduleUpcoming,omitempty"`
+ // IsRehydrate - Flag to identify that deferred deleted DS is to be moved into Pause state
+ IsRehydrate *bool `json:"isRehydrate,omitempty"`
// ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem'
ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"`
}
@@ -1731,6 +1741,21 @@ func (afpi AzureFileshareProtectedItem) MarshalJSON() ([]byte, error) {
if afpi.CreateMode != "" {
objectMap["createMode"] = afpi.CreateMode
}
+ if afpi.DeferredDeleteTimeInUTC != nil {
+ objectMap["deferredDeleteTimeInUTC"] = afpi.DeferredDeleteTimeInUTC
+ }
+ if afpi.IsScheduledForDeferredDelete != nil {
+ objectMap["isScheduledForDeferredDelete"] = afpi.IsScheduledForDeferredDelete
+ }
+ if afpi.DeferredDeleteTimeRemaining != nil {
+ objectMap["deferredDeleteTimeRemaining"] = afpi.DeferredDeleteTimeRemaining
+ }
+ if afpi.IsDeferredDeleteScheduleUpcoming != nil {
+ objectMap["isDeferredDeleteScheduleUpcoming"] = afpi.IsDeferredDeleteScheduleUpcoming
+ }
+ if afpi.IsRehydrate != nil {
+ objectMap["isRehydrate"] = afpi.IsRehydrate
+ }
if afpi.ProtectedItemType != "" {
objectMap["protectedItemType"] = afpi.ProtectedItemType
}
@@ -2481,6 +2506,16 @@ type AzureIaaSClassicComputeVMProtectedItem struct {
BackupSetName *string `json:"backupSetName,omitempty"`
// CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover'
CreateMode CreateMode `json:"createMode,omitempty"`
+ // DeferredDeleteTimeInUTC - Time for deferred deletion in UTC
+ DeferredDeleteTimeInUTC *date.Time `json:"deferredDeleteTimeInUTC,omitempty"`
+ // IsScheduledForDeferredDelete - Flag to identify whether the DS is scheduled for deferred delete
+ IsScheduledForDeferredDelete *bool `json:"isScheduledForDeferredDelete,omitempty"`
+ // DeferredDeleteTimeRemaining - Time remaining before the DS marked for deferred delete is permanently deleted
+ DeferredDeleteTimeRemaining *string `json:"deferredDeleteTimeRemaining,omitempty"`
+ // IsDeferredDeleteScheduleUpcoming - Flag to identify whether the deferred deleted DS is to be purged soon
+ IsDeferredDeleteScheduleUpcoming *bool `json:"isDeferredDeleteScheduleUpcoming,omitempty"`
+ // IsRehydrate - Flag to identify that deferred deleted DS is to be moved into Pause state
+ IsRehydrate *bool `json:"isRehydrate,omitempty"`
// ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem'
ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"`
}
@@ -2543,6 +2578,21 @@ func (aisccvpi AzureIaaSClassicComputeVMProtectedItem) MarshalJSON() ([]byte, er
if aisccvpi.CreateMode != "" {
objectMap["createMode"] = aisccvpi.CreateMode
}
+ if aisccvpi.DeferredDeleteTimeInUTC != nil {
+ objectMap["deferredDeleteTimeInUTC"] = aisccvpi.DeferredDeleteTimeInUTC
+ }
+ if aisccvpi.IsScheduledForDeferredDelete != nil {
+ objectMap["isScheduledForDeferredDelete"] = aisccvpi.IsScheduledForDeferredDelete
+ }
+ if aisccvpi.DeferredDeleteTimeRemaining != nil {
+ objectMap["deferredDeleteTimeRemaining"] = aisccvpi.DeferredDeleteTimeRemaining
+ }
+ if aisccvpi.IsDeferredDeleteScheduleUpcoming != nil {
+ objectMap["isDeferredDeleteScheduleUpcoming"] = aisccvpi.IsDeferredDeleteScheduleUpcoming
+ }
+ if aisccvpi.IsRehydrate != nil {
+ objectMap["isRehydrate"] = aisccvpi.IsRehydrate
+ }
if aisccvpi.ProtectedItemType != "" {
objectMap["protectedItemType"] = aisccvpi.ProtectedItemType
}
@@ -2927,6 +2977,16 @@ type AzureIaaSComputeVMProtectedItem struct {
BackupSetName *string `json:"backupSetName,omitempty"`
// CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover'
CreateMode CreateMode `json:"createMode,omitempty"`
+ // DeferredDeleteTimeInUTC - Time for deferred deletion in UTC
+ DeferredDeleteTimeInUTC *date.Time `json:"deferredDeleteTimeInUTC,omitempty"`
+ // IsScheduledForDeferredDelete - Flag to identify whether the DS is scheduled for deferred delete
+ IsScheduledForDeferredDelete *bool `json:"isScheduledForDeferredDelete,omitempty"`
+ // DeferredDeleteTimeRemaining - Time remaining before the DS marked for deferred delete is permanently deleted
+ DeferredDeleteTimeRemaining *string `json:"deferredDeleteTimeRemaining,omitempty"`
+ // IsDeferredDeleteScheduleUpcoming - Flag to identify whether the deferred deleted DS is to be purged soon
+ IsDeferredDeleteScheduleUpcoming *bool `json:"isDeferredDeleteScheduleUpcoming,omitempty"`
+ // IsRehydrate - Flag to identify that deferred deleted DS is to be moved into Pause state
+ IsRehydrate *bool `json:"isRehydrate,omitempty"`
// ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem'
ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"`
}
@@ -2989,6 +3049,21 @@ func (aiscvpi AzureIaaSComputeVMProtectedItem) MarshalJSON() ([]byte, error) {
if aiscvpi.CreateMode != "" {
objectMap["createMode"] = aiscvpi.CreateMode
}
+ if aiscvpi.DeferredDeleteTimeInUTC != nil {
+ objectMap["deferredDeleteTimeInUTC"] = aiscvpi.DeferredDeleteTimeInUTC
+ }
+ if aiscvpi.IsScheduledForDeferredDelete != nil {
+ objectMap["isScheduledForDeferredDelete"] = aiscvpi.IsScheduledForDeferredDelete
+ }
+ if aiscvpi.DeferredDeleteTimeRemaining != nil {
+ objectMap["deferredDeleteTimeRemaining"] = aiscvpi.DeferredDeleteTimeRemaining
+ }
+ if aiscvpi.IsDeferredDeleteScheduleUpcoming != nil {
+ objectMap["isDeferredDeleteScheduleUpcoming"] = aiscvpi.IsDeferredDeleteScheduleUpcoming
+ }
+ if aiscvpi.IsRehydrate != nil {
+ objectMap["isRehydrate"] = aiscvpi.IsRehydrate
+ }
if aiscvpi.ProtectedItemType != "" {
objectMap["protectedItemType"] = aiscvpi.ProtectedItemType
}
@@ -3132,6 +3207,16 @@ type AzureIaaSVMProtectedItem struct {
BackupSetName *string `json:"backupSetName,omitempty"`
// CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover'
CreateMode CreateMode `json:"createMode,omitempty"`
+ // DeferredDeleteTimeInUTC - Time for deferred deletion in UTC
+ DeferredDeleteTimeInUTC *date.Time `json:"deferredDeleteTimeInUTC,omitempty"`
+ // IsScheduledForDeferredDelete - Flag to identify whether the DS is scheduled for deferred delete
+ IsScheduledForDeferredDelete *bool `json:"isScheduledForDeferredDelete,omitempty"`
+ // DeferredDeleteTimeRemaining - Time remaining before the DS marked for deferred delete is permanently deleted
+ DeferredDeleteTimeRemaining *string `json:"deferredDeleteTimeRemaining,omitempty"`
+ // IsDeferredDeleteScheduleUpcoming - Flag to identify whether the deferred deleted DS is to be purged soon
+ IsDeferredDeleteScheduleUpcoming *bool `json:"isDeferredDeleteScheduleUpcoming,omitempty"`
+ // IsRehydrate - Flag to identify that deferred deleted DS is to be moved into Pause state
+ IsRehydrate *bool `json:"isRehydrate,omitempty"`
// ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem'
ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"`
}
@@ -3235,6 +3320,21 @@ func (aispi AzureIaaSVMProtectedItem) MarshalJSON() ([]byte, error) {
if aispi.CreateMode != "" {
objectMap["createMode"] = aispi.CreateMode
}
+ if aispi.DeferredDeleteTimeInUTC != nil {
+ objectMap["deferredDeleteTimeInUTC"] = aispi.DeferredDeleteTimeInUTC
+ }
+ if aispi.IsScheduledForDeferredDelete != nil {
+ objectMap["isScheduledForDeferredDelete"] = aispi.IsScheduledForDeferredDelete
+ }
+ if aispi.DeferredDeleteTimeRemaining != nil {
+ objectMap["deferredDeleteTimeRemaining"] = aispi.DeferredDeleteTimeRemaining
+ }
+ if aispi.IsDeferredDeleteScheduleUpcoming != nil {
+ objectMap["isDeferredDeleteScheduleUpcoming"] = aispi.IsDeferredDeleteScheduleUpcoming
+ }
+ if aispi.IsRehydrate != nil {
+ objectMap["isRehydrate"] = aispi.IsRehydrate
+ }
if aispi.ProtectedItemType != "" {
objectMap["protectedItemType"] = aispi.ProtectedItemType
}
@@ -3766,6 +3866,16 @@ type AzureSQLProtectedItem struct {
BackupSetName *string `json:"backupSetName,omitempty"`
// CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover'
CreateMode CreateMode `json:"createMode,omitempty"`
+ // DeferredDeleteTimeInUTC - Time for deferred deletion in UTC
+ DeferredDeleteTimeInUTC *date.Time `json:"deferredDeleteTimeInUTC,omitempty"`
+ // IsScheduledForDeferredDelete - Flag to identify whether the DS is scheduled for deferred delete
+ IsScheduledForDeferredDelete *bool `json:"isScheduledForDeferredDelete,omitempty"`
+ // DeferredDeleteTimeRemaining - Time remaining before the DS marked for deferred delete is permanently deleted
+ DeferredDeleteTimeRemaining *string `json:"deferredDeleteTimeRemaining,omitempty"`
+ // IsDeferredDeleteScheduleUpcoming - Flag to identify whether the deferred deleted DS is to be purged soon
+ IsDeferredDeleteScheduleUpcoming *bool `json:"isDeferredDeleteScheduleUpcoming,omitempty"`
+ // IsRehydrate - Flag to identify that deferred deleted DS is to be moved into Pause state
+ IsRehydrate *bool `json:"isRehydrate,omitempty"`
// ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem'
ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"`
}
@@ -3807,6 +3917,21 @@ func (aspi AzureSQLProtectedItem) MarshalJSON() ([]byte, error) {
if aspi.CreateMode != "" {
objectMap["createMode"] = aspi.CreateMode
}
+ if aspi.DeferredDeleteTimeInUTC != nil {
+ objectMap["deferredDeleteTimeInUTC"] = aspi.DeferredDeleteTimeInUTC
+ }
+ if aspi.IsScheduledForDeferredDelete != nil {
+ objectMap["isScheduledForDeferredDelete"] = aspi.IsScheduledForDeferredDelete
+ }
+ if aspi.DeferredDeleteTimeRemaining != nil {
+ objectMap["deferredDeleteTimeRemaining"] = aspi.DeferredDeleteTimeRemaining
+ }
+ if aspi.IsDeferredDeleteScheduleUpcoming != nil {
+ objectMap["isDeferredDeleteScheduleUpcoming"] = aspi.IsDeferredDeleteScheduleUpcoming
+ }
+ if aspi.IsRehydrate != nil {
+ objectMap["isRehydrate"] = aspi.IsRehydrate
+ }
if aspi.ProtectedItemType != "" {
objectMap["protectedItemType"] = aspi.ProtectedItemType
}
@@ -4866,6 +4991,16 @@ type AzureVMWorkloadProtectedItem struct {
BackupSetName *string `json:"backupSetName,omitempty"`
// CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover'
CreateMode CreateMode `json:"createMode,omitempty"`
+ // DeferredDeleteTimeInUTC - Time for deferred deletion in UTC
+ DeferredDeleteTimeInUTC *date.Time `json:"deferredDeleteTimeInUTC,omitempty"`
+ // IsScheduledForDeferredDelete - Flag to identify whether the DS is scheduled for deferred delete
+ IsScheduledForDeferredDelete *bool `json:"isScheduledForDeferredDelete,omitempty"`
+ // DeferredDeleteTimeRemaining - Time remaining before the DS marked for deferred delete is permanently deleted
+ DeferredDeleteTimeRemaining *string `json:"deferredDeleteTimeRemaining,omitempty"`
+ // IsDeferredDeleteScheduleUpcoming - Flag to identify whether the deferred deleted DS is to be purged soon
+ IsDeferredDeleteScheduleUpcoming *bool `json:"isDeferredDeleteScheduleUpcoming,omitempty"`
+ // IsRehydrate - Flag to identify that deferred deleted DS is to be moved into Pause state
+ IsRehydrate *bool `json:"isRehydrate,omitempty"`
// ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem'
ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"`
}
@@ -4979,6 +5114,21 @@ func (avwpi AzureVMWorkloadProtectedItem) MarshalJSON() ([]byte, error) {
if avwpi.CreateMode != "" {
objectMap["createMode"] = avwpi.CreateMode
}
+ if avwpi.DeferredDeleteTimeInUTC != nil {
+ objectMap["deferredDeleteTimeInUTC"] = avwpi.DeferredDeleteTimeInUTC
+ }
+ if avwpi.IsScheduledForDeferredDelete != nil {
+ objectMap["isScheduledForDeferredDelete"] = avwpi.IsScheduledForDeferredDelete
+ }
+ if avwpi.DeferredDeleteTimeRemaining != nil {
+ objectMap["deferredDeleteTimeRemaining"] = avwpi.DeferredDeleteTimeRemaining
+ }
+ if avwpi.IsDeferredDeleteScheduleUpcoming != nil {
+ objectMap["isDeferredDeleteScheduleUpcoming"] = avwpi.IsDeferredDeleteScheduleUpcoming
+ }
+ if avwpi.IsRehydrate != nil {
+ objectMap["isRehydrate"] = avwpi.IsRehydrate
+ }
if avwpi.ProtectedItemType != "" {
objectMap["protectedItemType"] = avwpi.ProtectedItemType
}
@@ -5353,6 +5503,16 @@ type AzureVMWorkloadSAPAseDatabaseProtectedItem struct {
BackupSetName *string `json:"backupSetName,omitempty"`
// CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover'
CreateMode CreateMode `json:"createMode,omitempty"`
+ // DeferredDeleteTimeInUTC - Time for deferred deletion in UTC
+ DeferredDeleteTimeInUTC *date.Time `json:"deferredDeleteTimeInUTC,omitempty"`
+ // IsScheduledForDeferredDelete - Flag to identify whether the DS is scheduled for deferred delete
+ IsScheduledForDeferredDelete *bool `json:"isScheduledForDeferredDelete,omitempty"`
+ // DeferredDeleteTimeRemaining - Time remaining before the DS marked for deferred delete is permanently deleted
+ DeferredDeleteTimeRemaining *string `json:"deferredDeleteTimeRemaining,omitempty"`
+ // IsDeferredDeleteScheduleUpcoming - Flag to identify whether the deferred deleted DS is to be purged soon
+ IsDeferredDeleteScheduleUpcoming *bool `json:"isDeferredDeleteScheduleUpcoming,omitempty"`
+ // IsRehydrate - Flag to identify that deferred deleted DS is to be moved into Pause state
+ IsRehydrate *bool `json:"isRehydrate,omitempty"`
// ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem'
ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"`
}
@@ -5421,6 +5581,21 @@ func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectedItem) MarshalJSON() ([]byte
if avwsadpi.CreateMode != "" {
objectMap["createMode"] = avwsadpi.CreateMode
}
+ if avwsadpi.DeferredDeleteTimeInUTC != nil {
+ objectMap["deferredDeleteTimeInUTC"] = avwsadpi.DeferredDeleteTimeInUTC
+ }
+ if avwsadpi.IsScheduledForDeferredDelete != nil {
+ objectMap["isScheduledForDeferredDelete"] = avwsadpi.IsScheduledForDeferredDelete
+ }
+ if avwsadpi.DeferredDeleteTimeRemaining != nil {
+ objectMap["deferredDeleteTimeRemaining"] = avwsadpi.DeferredDeleteTimeRemaining
+ }
+ if avwsadpi.IsDeferredDeleteScheduleUpcoming != nil {
+ objectMap["isDeferredDeleteScheduleUpcoming"] = avwsadpi.IsDeferredDeleteScheduleUpcoming
+ }
+ if avwsadpi.IsRehydrate != nil {
+ objectMap["isRehydrate"] = avwsadpi.IsRehydrate
+ }
if avwsadpi.ProtectedItemType != "" {
objectMap["protectedItemType"] = avwsadpi.ProtectedItemType
}
@@ -6090,6 +6265,16 @@ type AzureVMWorkloadSAPHanaDatabaseProtectedItem struct {
BackupSetName *string `json:"backupSetName,omitempty"`
// CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover'
CreateMode CreateMode `json:"createMode,omitempty"`
+ // DeferredDeleteTimeInUTC - Time for deferred deletion in UTC
+ DeferredDeleteTimeInUTC *date.Time `json:"deferredDeleteTimeInUTC,omitempty"`
+ // IsScheduledForDeferredDelete - Flag to identify whether the DS is scheduled for deferred delete
+ IsScheduledForDeferredDelete *bool `json:"isScheduledForDeferredDelete,omitempty"`
+ // DeferredDeleteTimeRemaining - Time remaining before the DS marked for deferred delete is permanently deleted
+ DeferredDeleteTimeRemaining *string `json:"deferredDeleteTimeRemaining,omitempty"`
+ // IsDeferredDeleteScheduleUpcoming - Flag to identify whether the deferred deleted DS is to be purged soon
+ IsDeferredDeleteScheduleUpcoming *bool `json:"isDeferredDeleteScheduleUpcoming,omitempty"`
+ // IsRehydrate - Flag to identify that deferred deleted DS is to be moved into Pause state
+ IsRehydrate *bool `json:"isRehydrate,omitempty"`
// ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem'
ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"`
}
@@ -6158,6 +6343,21 @@ func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) MarshalJSON() ([]byt
if avwshdpi.CreateMode != "" {
objectMap["createMode"] = avwshdpi.CreateMode
}
+ if avwshdpi.DeferredDeleteTimeInUTC != nil {
+ objectMap["deferredDeleteTimeInUTC"] = avwshdpi.DeferredDeleteTimeInUTC
+ }
+ if avwshdpi.IsScheduledForDeferredDelete != nil {
+ objectMap["isScheduledForDeferredDelete"] = avwshdpi.IsScheduledForDeferredDelete
+ }
+ if avwshdpi.DeferredDeleteTimeRemaining != nil {
+ objectMap["deferredDeleteTimeRemaining"] = avwshdpi.DeferredDeleteTimeRemaining
+ }
+ if avwshdpi.IsDeferredDeleteScheduleUpcoming != nil {
+ objectMap["isDeferredDeleteScheduleUpcoming"] = avwshdpi.IsDeferredDeleteScheduleUpcoming
+ }
+ if avwshdpi.IsRehydrate != nil {
+ objectMap["isRehydrate"] = avwshdpi.IsRehydrate
+ }
if avwshdpi.ProtectedItemType != "" {
objectMap["protectedItemType"] = avwshdpi.ProtectedItemType
}
@@ -6985,6 +7185,16 @@ type AzureVMWorkloadSQLDatabaseProtectedItem struct {
BackupSetName *string `json:"backupSetName,omitempty"`
// CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover'
CreateMode CreateMode `json:"createMode,omitempty"`
+ // DeferredDeleteTimeInUTC - Time for deferred deletion in UTC
+ DeferredDeleteTimeInUTC *date.Time `json:"deferredDeleteTimeInUTC,omitempty"`
+ // IsScheduledForDeferredDelete - Flag to identify whether the DS is scheduled for deferred delete
+ IsScheduledForDeferredDelete *bool `json:"isScheduledForDeferredDelete,omitempty"`
+ // DeferredDeleteTimeRemaining - Time remaining before the DS marked for deferred delete is permanently deleted
+ DeferredDeleteTimeRemaining *string `json:"deferredDeleteTimeRemaining,omitempty"`
+ // IsDeferredDeleteScheduleUpcoming - Flag to identify whether the deferred deleted DS is to be purged soon
+ IsDeferredDeleteScheduleUpcoming *bool `json:"isDeferredDeleteScheduleUpcoming,omitempty"`
+ // IsRehydrate - Flag to identify that deferred deleted DS is to be moved into Pause state
+ IsRehydrate *bool `json:"isRehydrate,omitempty"`
// ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem'
ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"`
}
@@ -7053,6 +7263,21 @@ func (avwsdpi AzureVMWorkloadSQLDatabaseProtectedItem) MarshalJSON() ([]byte, er
if avwsdpi.CreateMode != "" {
objectMap["createMode"] = avwsdpi.CreateMode
}
+ if avwsdpi.DeferredDeleteTimeInUTC != nil {
+ objectMap["deferredDeleteTimeInUTC"] = avwsdpi.DeferredDeleteTimeInUTC
+ }
+ if avwsdpi.IsScheduledForDeferredDelete != nil {
+ objectMap["isScheduledForDeferredDelete"] = avwsdpi.IsScheduledForDeferredDelete
+ }
+ if avwsdpi.DeferredDeleteTimeRemaining != nil {
+ objectMap["deferredDeleteTimeRemaining"] = avwsdpi.DeferredDeleteTimeRemaining
+ }
+ if avwsdpi.IsDeferredDeleteScheduleUpcoming != nil {
+ objectMap["isDeferredDeleteScheduleUpcoming"] = avwsdpi.IsDeferredDeleteScheduleUpcoming
+ }
+ if avwsdpi.IsRehydrate != nil {
+ objectMap["isRehydrate"] = avwsdpi.IsRehydrate
+ }
if avwsdpi.ProtectedItemType != "" {
objectMap["protectedItemType"] = avwsdpi.ProtectedItemType
}
@@ -9827,8 +10052,6 @@ type DPMProtectedItem struct {
BackupEngineName *string `json:"backupEngineName,omitempty"`
// ProtectionState - Protection state of the backup engine. Possible values include: 'ProtectedItemStateInvalid', 'ProtectedItemStateIRPending', 'ProtectedItemStateProtected', 'ProtectedItemStateProtectionError', 'ProtectedItemStateProtectionStopped', 'ProtectedItemStateProtectionPaused'
ProtectionState ProtectedItemState `json:"protectionState,omitempty"`
- // IsScheduledForDeferredDelete - To check if backup item is scheduled for deferred delete
- IsScheduledForDeferredDelete *bool `json:"isScheduledForDeferredDelete,omitempty"`
// ExtendedInfo - Extended info of the backup item.
ExtendedInfo *DPMProtectedItemExtendedInfo `json:"extendedInfo,omitempty"`
// BackupManagementType - Type of backup management for the backed up item. Possible values include: 'ManagementTypeInvalid', 'ManagementTypeAzureIaasVM', 'ManagementTypeMAB', 'ManagementTypeDPM', 'ManagementTypeAzureBackupServer', 'ManagementTypeAzureSQL', 'ManagementTypeAzureStorage', 'ManagementTypeAzureWorkload', 'ManagementTypeDefaultBackup'
@@ -9847,6 +10070,16 @@ type DPMProtectedItem struct {
BackupSetName *string `json:"backupSetName,omitempty"`
// CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover'
CreateMode CreateMode `json:"createMode,omitempty"`
+ // DeferredDeleteTimeInUTC - Time for deferred deletion in UTC
+ DeferredDeleteTimeInUTC *date.Time `json:"deferredDeleteTimeInUTC,omitempty"`
+ // IsScheduledForDeferredDelete - Flag to identify whether the DS is scheduled for deferred delete
+ IsScheduledForDeferredDelete *bool `json:"isScheduledForDeferredDelete,omitempty"`
+ // DeferredDeleteTimeRemaining - Time remaining before the DS marked for deferred delete is permanently deleted
+ DeferredDeleteTimeRemaining *string `json:"deferredDeleteTimeRemaining,omitempty"`
+ // IsDeferredDeleteScheduleUpcoming - Flag to identify whether the deferred deleted DS is to be purged soon
+ IsDeferredDeleteScheduleUpcoming *bool `json:"isDeferredDeleteScheduleUpcoming,omitempty"`
+ // IsRehydrate - Flag to identify that deferred deleted DS is to be moved into Pause state
+ IsRehydrate *bool `json:"isRehydrate,omitempty"`
// ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem'
ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"`
}
@@ -9864,9 +10097,6 @@ func (dpi DPMProtectedItem) MarshalJSON() ([]byte, error) {
if dpi.ProtectionState != "" {
objectMap["protectionState"] = dpi.ProtectionState
}
- if dpi.IsScheduledForDeferredDelete != nil {
- objectMap["isScheduledForDeferredDelete"] = dpi.IsScheduledForDeferredDelete
- }
if dpi.ExtendedInfo != nil {
objectMap["extendedInfo"] = dpi.ExtendedInfo
}
@@ -9894,6 +10124,21 @@ func (dpi DPMProtectedItem) MarshalJSON() ([]byte, error) {
if dpi.CreateMode != "" {
objectMap["createMode"] = dpi.CreateMode
}
+ if dpi.DeferredDeleteTimeInUTC != nil {
+ objectMap["deferredDeleteTimeInUTC"] = dpi.DeferredDeleteTimeInUTC
+ }
+ if dpi.IsScheduledForDeferredDelete != nil {
+ objectMap["isScheduledForDeferredDelete"] = dpi.IsScheduledForDeferredDelete
+ }
+ if dpi.DeferredDeleteTimeRemaining != nil {
+ objectMap["deferredDeleteTimeRemaining"] = dpi.DeferredDeleteTimeRemaining
+ }
+ if dpi.IsDeferredDeleteScheduleUpcoming != nil {
+ objectMap["isDeferredDeleteScheduleUpcoming"] = dpi.IsDeferredDeleteScheduleUpcoming
+ }
+ if dpi.IsRehydrate != nil {
+ objectMap["isRehydrate"] = dpi.IsRehydrate
+ }
if dpi.ProtectedItemType != "" {
objectMap["protectedItemType"] = dpi.ProtectedItemType
}
@@ -10693,6 +10938,16 @@ type GenericProtectedItem struct {
BackupSetName *string `json:"backupSetName,omitempty"`
// CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover'
CreateMode CreateMode `json:"createMode,omitempty"`
+ // DeferredDeleteTimeInUTC - Time for deferred deletion in UTC
+ DeferredDeleteTimeInUTC *date.Time `json:"deferredDeleteTimeInUTC,omitempty"`
+ // IsScheduledForDeferredDelete - Flag to identify whether the DS is scheduled for deferred delete
+ IsScheduledForDeferredDelete *bool `json:"isScheduledForDeferredDelete,omitempty"`
+ // DeferredDeleteTimeRemaining - Time remaining before the DS marked for deferred delete is permanently deleted
+ DeferredDeleteTimeRemaining *string `json:"deferredDeleteTimeRemaining,omitempty"`
+ // IsDeferredDeleteScheduleUpcoming - Flag to identify whether the deferred deleted DS is to be purged soon
+ IsDeferredDeleteScheduleUpcoming *bool `json:"isDeferredDeleteScheduleUpcoming,omitempty"`
+ // IsRehydrate - Flag to identify that deferred deleted DS is to be moved into Pause state
+ IsRehydrate *bool `json:"isRehydrate,omitempty"`
// ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem'
ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"`
}
@@ -10743,6 +10998,21 @@ func (gpi GenericProtectedItem) MarshalJSON() ([]byte, error) {
if gpi.CreateMode != "" {
objectMap["createMode"] = gpi.CreateMode
}
+ if gpi.DeferredDeleteTimeInUTC != nil {
+ objectMap["deferredDeleteTimeInUTC"] = gpi.DeferredDeleteTimeInUTC
+ }
+ if gpi.IsScheduledForDeferredDelete != nil {
+ objectMap["isScheduledForDeferredDelete"] = gpi.IsScheduledForDeferredDelete
+ }
+ if gpi.DeferredDeleteTimeRemaining != nil {
+ objectMap["deferredDeleteTimeRemaining"] = gpi.DeferredDeleteTimeRemaining
+ }
+ if gpi.IsDeferredDeleteScheduleUpcoming != nil {
+ objectMap["isDeferredDeleteScheduleUpcoming"] = gpi.IsDeferredDeleteScheduleUpcoming
+ }
+ if gpi.IsRehydrate != nil {
+ objectMap["isRehydrate"] = gpi.IsRehydrate
+ }
if gpi.ProtectedItemType != "" {
objectMap["protectedItemType"] = gpi.ProtectedItemType
}
@@ -12343,8 +12613,6 @@ type MabFileFolderProtectedItem struct {
LastBackupStatus *string `json:"lastBackupStatus,omitempty"`
// ProtectionState - Protected, ProtectionStopped, IRPending or ProtectionError
ProtectionState *string `json:"protectionState,omitempty"`
- // IsScheduledForDeferredDelete - Specifies if the item is scheduled for deferred deletion.
- IsScheduledForDeferredDelete *bool `json:"isScheduledForDeferredDelete,omitempty"`
// DeferredDeleteSyncTimeInUTC - Sync time for deferred deletion.
DeferredDeleteSyncTimeInUTC *int64 `json:"deferredDeleteSyncTimeInUTC,omitempty"`
// ExtendedInfo - Additional information with this backup item.
@@ -12365,6 +12633,16 @@ type MabFileFolderProtectedItem struct {
BackupSetName *string `json:"backupSetName,omitempty"`
// CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover'
CreateMode CreateMode `json:"createMode,omitempty"`
+ // DeferredDeleteTimeInUTC - Time for deferred deletion in UTC
+ DeferredDeleteTimeInUTC *date.Time `json:"deferredDeleteTimeInUTC,omitempty"`
+ // IsScheduledForDeferredDelete - Flag to identify whether the DS is scheduled for deferred delete
+ IsScheduledForDeferredDelete *bool `json:"isScheduledForDeferredDelete,omitempty"`
+ // DeferredDeleteTimeRemaining - Time remaining before the DS marked for deferred delete is permanently deleted
+ DeferredDeleteTimeRemaining *string `json:"deferredDeleteTimeRemaining,omitempty"`
+ // IsDeferredDeleteScheduleUpcoming - Flag to identify whether the deferred deleted DS is to be purged soon
+ IsDeferredDeleteScheduleUpcoming *bool `json:"isDeferredDeleteScheduleUpcoming,omitempty"`
+ // IsRehydrate - Flag to identify that deferred deleted DS is to be moved into Pause state
+ IsRehydrate *bool `json:"isRehydrate,omitempty"`
// ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem'
ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"`
}
@@ -12385,9 +12663,6 @@ func (mffpi MabFileFolderProtectedItem) MarshalJSON() ([]byte, error) {
if mffpi.ProtectionState != nil {
objectMap["protectionState"] = mffpi.ProtectionState
}
- if mffpi.IsScheduledForDeferredDelete != nil {
- objectMap["isScheduledForDeferredDelete"] = mffpi.IsScheduledForDeferredDelete
- }
if mffpi.DeferredDeleteSyncTimeInUTC != nil {
objectMap["deferredDeleteSyncTimeInUTC"] = mffpi.DeferredDeleteSyncTimeInUTC
}
@@ -12418,6 +12693,21 @@ func (mffpi MabFileFolderProtectedItem) MarshalJSON() ([]byte, error) {
if mffpi.CreateMode != "" {
objectMap["createMode"] = mffpi.CreateMode
}
+ if mffpi.DeferredDeleteTimeInUTC != nil {
+ objectMap["deferredDeleteTimeInUTC"] = mffpi.DeferredDeleteTimeInUTC
+ }
+ if mffpi.IsScheduledForDeferredDelete != nil {
+ objectMap["isScheduledForDeferredDelete"] = mffpi.IsScheduledForDeferredDelete
+ }
+ if mffpi.DeferredDeleteTimeRemaining != nil {
+ objectMap["deferredDeleteTimeRemaining"] = mffpi.DeferredDeleteTimeRemaining
+ }
+ if mffpi.IsDeferredDeleteScheduleUpcoming != nil {
+ objectMap["isDeferredDeleteScheduleUpcoming"] = mffpi.IsDeferredDeleteScheduleUpcoming
+ }
+ if mffpi.IsRehydrate != nil {
+ objectMap["isRehydrate"] = mffpi.IsRehydrate
+ }
if mffpi.ProtectedItemType != "" {
objectMap["protectedItemType"] = mffpi.ProtectedItemType
}
@@ -13402,6 +13692,16 @@ type ProtectedItem struct {
BackupSetName *string `json:"backupSetName,omitempty"`
// CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover'
CreateMode CreateMode `json:"createMode,omitempty"`
+ // DeferredDeleteTimeInUTC - Time for deferred deletion in UTC
+ DeferredDeleteTimeInUTC *date.Time `json:"deferredDeleteTimeInUTC,omitempty"`
+ // IsScheduledForDeferredDelete - Flag to identify whether the DS is scheduled for deferred delete
+ IsScheduledForDeferredDelete *bool `json:"isScheduledForDeferredDelete,omitempty"`
+ // DeferredDeleteTimeRemaining - Time remaining before the DS marked for deferred delete is permanently deleted
+ DeferredDeleteTimeRemaining *string `json:"deferredDeleteTimeRemaining,omitempty"`
+ // IsDeferredDeleteScheduleUpcoming - Flag to identify whether the deferred deleted DS is to be purged soon
+ IsDeferredDeleteScheduleUpcoming *bool `json:"isDeferredDeleteScheduleUpcoming,omitempty"`
+ // IsRehydrate - Flag to identify that deferred deleted DS is to be moved into Pause state
+ IsRehydrate *bool `json:"isRehydrate,omitempty"`
// ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem'
ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"`
}
@@ -13515,6 +13815,21 @@ func (pi ProtectedItem) MarshalJSON() ([]byte, error) {
if pi.CreateMode != "" {
objectMap["createMode"] = pi.CreateMode
}
+ if pi.DeferredDeleteTimeInUTC != nil {
+ objectMap["deferredDeleteTimeInUTC"] = pi.DeferredDeleteTimeInUTC
+ }
+ if pi.IsScheduledForDeferredDelete != nil {
+ objectMap["isScheduledForDeferredDelete"] = pi.IsScheduledForDeferredDelete
+ }
+ if pi.DeferredDeleteTimeRemaining != nil {
+ objectMap["deferredDeleteTimeRemaining"] = pi.DeferredDeleteTimeRemaining
+ }
+ if pi.IsDeferredDeleteScheduleUpcoming != nil {
+ objectMap["isDeferredDeleteScheduleUpcoming"] = pi.IsDeferredDeleteScheduleUpcoming
+ }
+ if pi.IsRehydrate != nil {
+ objectMap["isRehydrate"] = pi.IsRehydrate
+ }
if pi.ProtectedItemType != "" {
objectMap["protectedItemType"] = pi.ProtectedItemType
}
diff --git a/services/recoveryservices/mgmt/2017-07-01/backup/models.go b/services/recoveryservices/mgmt/2017-07-01/backup/models.go
index 85748428e4cd..90427aeb46f5 100644
--- a/services/recoveryservices/mgmt/2017-07-01/backup/models.go
+++ b/services/recoveryservices/mgmt/2017-07-01/backup/models.go
@@ -2154,6 +2154,16 @@ type AzureFileshareProtectedItem struct {
BackupSetName *string `json:"backupSetName,omitempty"`
// CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover'
CreateMode CreateMode `json:"createMode,omitempty"`
+ // DeferredDeleteTimeInUTC - Time for deferred deletion in UTC
+ DeferredDeleteTimeInUTC *date.Time `json:"deferredDeleteTimeInUTC,omitempty"`
+ // IsScheduledForDeferredDelete - Flag to identify whether the DS is scheduled for deferred delete
+ IsScheduledForDeferredDelete *bool `json:"isScheduledForDeferredDelete,omitempty"`
+ // DeferredDeleteTimeRemaining - Time remaining before the DS marked for deferred delete is permanently deleted
+ DeferredDeleteTimeRemaining *string `json:"deferredDeleteTimeRemaining,omitempty"`
+ // IsDeferredDeleteScheduleUpcoming - Flag to identify whether the deferred deleted DS is to be purged soon
+ IsDeferredDeleteScheduleUpcoming *bool `json:"isDeferredDeleteScheduleUpcoming,omitempty"`
+ // IsRehydrate - Flag to identify that deferred deleted DS is to be moved into Pause state
+ IsRehydrate *bool `json:"isRehydrate,omitempty"`
// ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem'
ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"`
}
@@ -2207,6 +2217,21 @@ func (afpi AzureFileshareProtectedItem) MarshalJSON() ([]byte, error) {
if afpi.CreateMode != "" {
objectMap["createMode"] = afpi.CreateMode
}
+ if afpi.DeferredDeleteTimeInUTC != nil {
+ objectMap["deferredDeleteTimeInUTC"] = afpi.DeferredDeleteTimeInUTC
+ }
+ if afpi.IsScheduledForDeferredDelete != nil {
+ objectMap["isScheduledForDeferredDelete"] = afpi.IsScheduledForDeferredDelete
+ }
+ if afpi.DeferredDeleteTimeRemaining != nil {
+ objectMap["deferredDeleteTimeRemaining"] = afpi.DeferredDeleteTimeRemaining
+ }
+ if afpi.IsDeferredDeleteScheduleUpcoming != nil {
+ objectMap["isDeferredDeleteScheduleUpcoming"] = afpi.IsDeferredDeleteScheduleUpcoming
+ }
+ if afpi.IsRehydrate != nil {
+ objectMap["isRehydrate"] = afpi.IsRehydrate
+ }
if afpi.ProtectedItemType != "" {
objectMap["protectedItemType"] = afpi.ProtectedItemType
}
@@ -2957,6 +2982,16 @@ type AzureIaaSClassicComputeVMProtectedItem struct {
BackupSetName *string `json:"backupSetName,omitempty"`
// CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover'
CreateMode CreateMode `json:"createMode,omitempty"`
+ // DeferredDeleteTimeInUTC - Time for deferred deletion in UTC
+ DeferredDeleteTimeInUTC *date.Time `json:"deferredDeleteTimeInUTC,omitempty"`
+ // IsScheduledForDeferredDelete - Flag to identify whether the DS is scheduled for deferred delete
+ IsScheduledForDeferredDelete *bool `json:"isScheduledForDeferredDelete,omitempty"`
+ // DeferredDeleteTimeRemaining - Time remaining before the DS marked for deferred delete is permanently deleted
+ DeferredDeleteTimeRemaining *string `json:"deferredDeleteTimeRemaining,omitempty"`
+ // IsDeferredDeleteScheduleUpcoming - Flag to identify whether the deferred deleted DS is to be purged soon
+ IsDeferredDeleteScheduleUpcoming *bool `json:"isDeferredDeleteScheduleUpcoming,omitempty"`
+ // IsRehydrate - Flag to identify that deferred deleted DS is to be moved into Pause state
+ IsRehydrate *bool `json:"isRehydrate,omitempty"`
// ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem'
ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"`
}
@@ -3019,6 +3054,21 @@ func (aisccvpi AzureIaaSClassicComputeVMProtectedItem) MarshalJSON() ([]byte, er
if aisccvpi.CreateMode != "" {
objectMap["createMode"] = aisccvpi.CreateMode
}
+ if aisccvpi.DeferredDeleteTimeInUTC != nil {
+ objectMap["deferredDeleteTimeInUTC"] = aisccvpi.DeferredDeleteTimeInUTC
+ }
+ if aisccvpi.IsScheduledForDeferredDelete != nil {
+ objectMap["isScheduledForDeferredDelete"] = aisccvpi.IsScheduledForDeferredDelete
+ }
+ if aisccvpi.DeferredDeleteTimeRemaining != nil {
+ objectMap["deferredDeleteTimeRemaining"] = aisccvpi.DeferredDeleteTimeRemaining
+ }
+ if aisccvpi.IsDeferredDeleteScheduleUpcoming != nil {
+ objectMap["isDeferredDeleteScheduleUpcoming"] = aisccvpi.IsDeferredDeleteScheduleUpcoming
+ }
+ if aisccvpi.IsRehydrate != nil {
+ objectMap["isRehydrate"] = aisccvpi.IsRehydrate
+ }
if aisccvpi.ProtectedItemType != "" {
objectMap["protectedItemType"] = aisccvpi.ProtectedItemType
}
@@ -3403,6 +3453,16 @@ type AzureIaaSComputeVMProtectedItem struct {
BackupSetName *string `json:"backupSetName,omitempty"`
// CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover'
CreateMode CreateMode `json:"createMode,omitempty"`
+ // DeferredDeleteTimeInUTC - Time for deferred deletion in UTC
+ DeferredDeleteTimeInUTC *date.Time `json:"deferredDeleteTimeInUTC,omitempty"`
+ // IsScheduledForDeferredDelete - Flag to identify whether the DS is scheduled for deferred delete
+ IsScheduledForDeferredDelete *bool `json:"isScheduledForDeferredDelete,omitempty"`
+ // DeferredDeleteTimeRemaining - Time remaining before the DS marked for deferred delete is permanently deleted
+ DeferredDeleteTimeRemaining *string `json:"deferredDeleteTimeRemaining,omitempty"`
+ // IsDeferredDeleteScheduleUpcoming - Flag to identify whether the deferred deleted DS is to be purged soon
+ IsDeferredDeleteScheduleUpcoming *bool `json:"isDeferredDeleteScheduleUpcoming,omitempty"`
+ // IsRehydrate - Flag to identify that deferred deleted DS is to be moved into Pause state
+ IsRehydrate *bool `json:"isRehydrate,omitempty"`
// ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem'
ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"`
}
@@ -3465,6 +3525,21 @@ func (aiscvpi AzureIaaSComputeVMProtectedItem) MarshalJSON() ([]byte, error) {
if aiscvpi.CreateMode != "" {
objectMap["createMode"] = aiscvpi.CreateMode
}
+ if aiscvpi.DeferredDeleteTimeInUTC != nil {
+ objectMap["deferredDeleteTimeInUTC"] = aiscvpi.DeferredDeleteTimeInUTC
+ }
+ if aiscvpi.IsScheduledForDeferredDelete != nil {
+ objectMap["isScheduledForDeferredDelete"] = aiscvpi.IsScheduledForDeferredDelete
+ }
+ if aiscvpi.DeferredDeleteTimeRemaining != nil {
+ objectMap["deferredDeleteTimeRemaining"] = aiscvpi.DeferredDeleteTimeRemaining
+ }
+ if aiscvpi.IsDeferredDeleteScheduleUpcoming != nil {
+ objectMap["isDeferredDeleteScheduleUpcoming"] = aiscvpi.IsDeferredDeleteScheduleUpcoming
+ }
+ if aiscvpi.IsRehydrate != nil {
+ objectMap["isRehydrate"] = aiscvpi.IsRehydrate
+ }
if aiscvpi.ProtectedItemType != "" {
objectMap["protectedItemType"] = aiscvpi.ProtectedItemType
}
@@ -3792,6 +3867,16 @@ type AzureIaaSVMProtectedItem struct {
BackupSetName *string `json:"backupSetName,omitempty"`
// CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover'
CreateMode CreateMode `json:"createMode,omitempty"`
+ // DeferredDeleteTimeInUTC - Time for deferred deletion in UTC
+ DeferredDeleteTimeInUTC *date.Time `json:"deferredDeleteTimeInUTC,omitempty"`
+ // IsScheduledForDeferredDelete - Flag to identify whether the DS is scheduled for deferred delete
+ IsScheduledForDeferredDelete *bool `json:"isScheduledForDeferredDelete,omitempty"`
+ // DeferredDeleteTimeRemaining - Time remaining before the DS marked for deferred delete is permanently deleted
+ DeferredDeleteTimeRemaining *string `json:"deferredDeleteTimeRemaining,omitempty"`
+ // IsDeferredDeleteScheduleUpcoming - Flag to identify whether the deferred deleted DS is to be purged soon
+ IsDeferredDeleteScheduleUpcoming *bool `json:"isDeferredDeleteScheduleUpcoming,omitempty"`
+ // IsRehydrate - Flag to identify that deferred deleted DS is to be moved into Pause state
+ IsRehydrate *bool `json:"isRehydrate,omitempty"`
// ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem'
ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"`
}
@@ -3895,6 +3980,21 @@ func (aispi AzureIaaSVMProtectedItem) MarshalJSON() ([]byte, error) {
if aispi.CreateMode != "" {
objectMap["createMode"] = aispi.CreateMode
}
+ if aispi.DeferredDeleteTimeInUTC != nil {
+ objectMap["deferredDeleteTimeInUTC"] = aispi.DeferredDeleteTimeInUTC
+ }
+ if aispi.IsScheduledForDeferredDelete != nil {
+ objectMap["isScheduledForDeferredDelete"] = aispi.IsScheduledForDeferredDelete
+ }
+ if aispi.DeferredDeleteTimeRemaining != nil {
+ objectMap["deferredDeleteTimeRemaining"] = aispi.DeferredDeleteTimeRemaining
+ }
+ if aispi.IsDeferredDeleteScheduleUpcoming != nil {
+ objectMap["isDeferredDeleteScheduleUpcoming"] = aispi.IsDeferredDeleteScheduleUpcoming
+ }
+ if aispi.IsRehydrate != nil {
+ objectMap["isRehydrate"] = aispi.IsRehydrate
+ }
if aispi.ProtectedItemType != "" {
objectMap["protectedItemType"] = aispi.ProtectedItemType
}
@@ -4642,6 +4742,16 @@ type AzureSQLProtectedItem struct {
BackupSetName *string `json:"backupSetName,omitempty"`
// CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover'
CreateMode CreateMode `json:"createMode,omitempty"`
+ // DeferredDeleteTimeInUTC - Time for deferred deletion in UTC
+ DeferredDeleteTimeInUTC *date.Time `json:"deferredDeleteTimeInUTC,omitempty"`
+ // IsScheduledForDeferredDelete - Flag to identify whether the DS is scheduled for deferred delete
+ IsScheduledForDeferredDelete *bool `json:"isScheduledForDeferredDelete,omitempty"`
+ // DeferredDeleteTimeRemaining - Time remaining before the DS marked for deferred delete is permanently deleted
+ DeferredDeleteTimeRemaining *string `json:"deferredDeleteTimeRemaining,omitempty"`
+ // IsDeferredDeleteScheduleUpcoming - Flag to identify whether the deferred deleted DS is to be purged soon
+ IsDeferredDeleteScheduleUpcoming *bool `json:"isDeferredDeleteScheduleUpcoming,omitempty"`
+ // IsRehydrate - Flag to identify that deferred deleted DS is to be moved into Pause state
+ IsRehydrate *bool `json:"isRehydrate,omitempty"`
// ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem'
ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"`
}
@@ -4683,6 +4793,21 @@ func (aspi AzureSQLProtectedItem) MarshalJSON() ([]byte, error) {
if aspi.CreateMode != "" {
objectMap["createMode"] = aspi.CreateMode
}
+ if aspi.DeferredDeleteTimeInUTC != nil {
+ objectMap["deferredDeleteTimeInUTC"] = aspi.DeferredDeleteTimeInUTC
+ }
+ if aspi.IsScheduledForDeferredDelete != nil {
+ objectMap["isScheduledForDeferredDelete"] = aspi.IsScheduledForDeferredDelete
+ }
+ if aspi.DeferredDeleteTimeRemaining != nil {
+ objectMap["deferredDeleteTimeRemaining"] = aspi.DeferredDeleteTimeRemaining
+ }
+ if aspi.IsDeferredDeleteScheduleUpcoming != nil {
+ objectMap["isDeferredDeleteScheduleUpcoming"] = aspi.IsDeferredDeleteScheduleUpcoming
+ }
+ if aspi.IsRehydrate != nil {
+ objectMap["isRehydrate"] = aspi.IsRehydrate
+ }
if aspi.ProtectedItemType != "" {
objectMap["protectedItemType"] = aspi.ProtectedItemType
}
@@ -5954,6 +6079,16 @@ type AzureVMWorkloadProtectedItem struct {
BackupSetName *string `json:"backupSetName,omitempty"`
// CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover'
CreateMode CreateMode `json:"createMode,omitempty"`
+ // DeferredDeleteTimeInUTC - Time for deferred deletion in UTC
+ DeferredDeleteTimeInUTC *date.Time `json:"deferredDeleteTimeInUTC,omitempty"`
+ // IsScheduledForDeferredDelete - Flag to identify whether the DS is scheduled for deferred delete
+ IsScheduledForDeferredDelete *bool `json:"isScheduledForDeferredDelete,omitempty"`
+ // DeferredDeleteTimeRemaining - Time remaining before the DS marked for deferred delete is permanently deleted
+ DeferredDeleteTimeRemaining *string `json:"deferredDeleteTimeRemaining,omitempty"`
+ // IsDeferredDeleteScheduleUpcoming - Flag to identify whether the deferred deleted DS is to be purged soon
+ IsDeferredDeleteScheduleUpcoming *bool `json:"isDeferredDeleteScheduleUpcoming,omitempty"`
+ // IsRehydrate - Flag to identify that deferred deleted DS is to be moved into Pause state
+ IsRehydrate *bool `json:"isRehydrate,omitempty"`
// ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem'
ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"`
}
@@ -6067,6 +6202,21 @@ func (avwpi AzureVMWorkloadProtectedItem) MarshalJSON() ([]byte, error) {
if avwpi.CreateMode != "" {
objectMap["createMode"] = avwpi.CreateMode
}
+ if avwpi.DeferredDeleteTimeInUTC != nil {
+ objectMap["deferredDeleteTimeInUTC"] = avwpi.DeferredDeleteTimeInUTC
+ }
+ if avwpi.IsScheduledForDeferredDelete != nil {
+ objectMap["isScheduledForDeferredDelete"] = avwpi.IsScheduledForDeferredDelete
+ }
+ if avwpi.DeferredDeleteTimeRemaining != nil {
+ objectMap["deferredDeleteTimeRemaining"] = avwpi.DeferredDeleteTimeRemaining
+ }
+ if avwpi.IsDeferredDeleteScheduleUpcoming != nil {
+ objectMap["isDeferredDeleteScheduleUpcoming"] = avwpi.IsDeferredDeleteScheduleUpcoming
+ }
+ if avwpi.IsRehydrate != nil {
+ objectMap["isRehydrate"] = avwpi.IsRehydrate
+ }
if avwpi.ProtectedItemType != "" {
objectMap["protectedItemType"] = avwpi.ProtectedItemType
}
@@ -6441,6 +6591,16 @@ type AzureVMWorkloadSAPAseDatabaseProtectedItem struct {
BackupSetName *string `json:"backupSetName,omitempty"`
// CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover'
CreateMode CreateMode `json:"createMode,omitempty"`
+ // DeferredDeleteTimeInUTC - Time for deferred deletion in UTC
+ DeferredDeleteTimeInUTC *date.Time `json:"deferredDeleteTimeInUTC,omitempty"`
+ // IsScheduledForDeferredDelete - Flag to identify whether the DS is scheduled for deferred delete
+ IsScheduledForDeferredDelete *bool `json:"isScheduledForDeferredDelete,omitempty"`
+ // DeferredDeleteTimeRemaining - Time remaining before the DS marked for deferred delete is permanently deleted
+ DeferredDeleteTimeRemaining *string `json:"deferredDeleteTimeRemaining,omitempty"`
+ // IsDeferredDeleteScheduleUpcoming - Flag to identify whether the deferred deleted DS is to be purged soon
+ IsDeferredDeleteScheduleUpcoming *bool `json:"isDeferredDeleteScheduleUpcoming,omitempty"`
+ // IsRehydrate - Flag to identify that deferred deleted DS is to be moved into Pause state
+ IsRehydrate *bool `json:"isRehydrate,omitempty"`
// ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem'
ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"`
}
@@ -6509,6 +6669,21 @@ func (avwsadpi AzureVMWorkloadSAPAseDatabaseProtectedItem) MarshalJSON() ([]byte
if avwsadpi.CreateMode != "" {
objectMap["createMode"] = avwsadpi.CreateMode
}
+ if avwsadpi.DeferredDeleteTimeInUTC != nil {
+ objectMap["deferredDeleteTimeInUTC"] = avwsadpi.DeferredDeleteTimeInUTC
+ }
+ if avwsadpi.IsScheduledForDeferredDelete != nil {
+ objectMap["isScheduledForDeferredDelete"] = avwsadpi.IsScheduledForDeferredDelete
+ }
+ if avwsadpi.DeferredDeleteTimeRemaining != nil {
+ objectMap["deferredDeleteTimeRemaining"] = avwsadpi.DeferredDeleteTimeRemaining
+ }
+ if avwsadpi.IsDeferredDeleteScheduleUpcoming != nil {
+ objectMap["isDeferredDeleteScheduleUpcoming"] = avwsadpi.IsDeferredDeleteScheduleUpcoming
+ }
+ if avwsadpi.IsRehydrate != nil {
+ objectMap["isRehydrate"] = avwsadpi.IsRehydrate
+ }
if avwsadpi.ProtectedItemType != "" {
objectMap["protectedItemType"] = avwsadpi.ProtectedItemType
}
@@ -7178,6 +7353,16 @@ type AzureVMWorkloadSAPHanaDatabaseProtectedItem struct {
BackupSetName *string `json:"backupSetName,omitempty"`
// CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover'
CreateMode CreateMode `json:"createMode,omitempty"`
+ // DeferredDeleteTimeInUTC - Time for deferred deletion in UTC
+ DeferredDeleteTimeInUTC *date.Time `json:"deferredDeleteTimeInUTC,omitempty"`
+ // IsScheduledForDeferredDelete - Flag to identify whether the DS is scheduled for deferred delete
+ IsScheduledForDeferredDelete *bool `json:"isScheduledForDeferredDelete,omitempty"`
+ // DeferredDeleteTimeRemaining - Time remaining before the DS marked for deferred delete is permanently deleted
+ DeferredDeleteTimeRemaining *string `json:"deferredDeleteTimeRemaining,omitempty"`
+ // IsDeferredDeleteScheduleUpcoming - Flag to identify whether the deferred deleted DS is to be purged soon
+ IsDeferredDeleteScheduleUpcoming *bool `json:"isDeferredDeleteScheduleUpcoming,omitempty"`
+ // IsRehydrate - Flag to identify that deferred deleted DS is to be moved into Pause state
+ IsRehydrate *bool `json:"isRehydrate,omitempty"`
// ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem'
ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"`
}
@@ -7246,6 +7431,21 @@ func (avwshdpi AzureVMWorkloadSAPHanaDatabaseProtectedItem) MarshalJSON() ([]byt
if avwshdpi.CreateMode != "" {
objectMap["createMode"] = avwshdpi.CreateMode
}
+ if avwshdpi.DeferredDeleteTimeInUTC != nil {
+ objectMap["deferredDeleteTimeInUTC"] = avwshdpi.DeferredDeleteTimeInUTC
+ }
+ if avwshdpi.IsScheduledForDeferredDelete != nil {
+ objectMap["isScheduledForDeferredDelete"] = avwshdpi.IsScheduledForDeferredDelete
+ }
+ if avwshdpi.DeferredDeleteTimeRemaining != nil {
+ objectMap["deferredDeleteTimeRemaining"] = avwshdpi.DeferredDeleteTimeRemaining
+ }
+ if avwshdpi.IsDeferredDeleteScheduleUpcoming != nil {
+ objectMap["isDeferredDeleteScheduleUpcoming"] = avwshdpi.IsDeferredDeleteScheduleUpcoming
+ }
+ if avwshdpi.IsRehydrate != nil {
+ objectMap["isRehydrate"] = avwshdpi.IsRehydrate
+ }
if avwshdpi.ProtectedItemType != "" {
objectMap["protectedItemType"] = avwshdpi.ProtectedItemType
}
@@ -8073,6 +8273,16 @@ type AzureVMWorkloadSQLDatabaseProtectedItem struct {
BackupSetName *string `json:"backupSetName,omitempty"`
// CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover'
CreateMode CreateMode `json:"createMode,omitempty"`
+ // DeferredDeleteTimeInUTC - Time for deferred deletion in UTC
+ DeferredDeleteTimeInUTC *date.Time `json:"deferredDeleteTimeInUTC,omitempty"`
+ // IsScheduledForDeferredDelete - Flag to identify whether the DS is scheduled for deferred delete
+ IsScheduledForDeferredDelete *bool `json:"isScheduledForDeferredDelete,omitempty"`
+ // DeferredDeleteTimeRemaining - Time remaining before the DS marked for deferred delete is permanently deleted
+ DeferredDeleteTimeRemaining *string `json:"deferredDeleteTimeRemaining,omitempty"`
+ // IsDeferredDeleteScheduleUpcoming - Flag to identify whether the deferred deleted DS is to be purged soon
+ IsDeferredDeleteScheduleUpcoming *bool `json:"isDeferredDeleteScheduleUpcoming,omitempty"`
+ // IsRehydrate - Flag to identify that deferred deleted DS is to be moved into Pause state
+ IsRehydrate *bool `json:"isRehydrate,omitempty"`
// ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem'
ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"`
}
@@ -8141,6 +8351,21 @@ func (avwsdpi AzureVMWorkloadSQLDatabaseProtectedItem) MarshalJSON() ([]byte, er
if avwsdpi.CreateMode != "" {
objectMap["createMode"] = avwsdpi.CreateMode
}
+ if avwsdpi.DeferredDeleteTimeInUTC != nil {
+ objectMap["deferredDeleteTimeInUTC"] = avwsdpi.DeferredDeleteTimeInUTC
+ }
+ if avwsdpi.IsScheduledForDeferredDelete != nil {
+ objectMap["isScheduledForDeferredDelete"] = avwsdpi.IsScheduledForDeferredDelete
+ }
+ if avwsdpi.DeferredDeleteTimeRemaining != nil {
+ objectMap["deferredDeleteTimeRemaining"] = avwsdpi.DeferredDeleteTimeRemaining
+ }
+ if avwsdpi.IsDeferredDeleteScheduleUpcoming != nil {
+ objectMap["isDeferredDeleteScheduleUpcoming"] = avwsdpi.IsDeferredDeleteScheduleUpcoming
+ }
+ if avwsdpi.IsRehydrate != nil {
+ objectMap["isRehydrate"] = avwsdpi.IsRehydrate
+ }
if avwsdpi.ProtectedItemType != "" {
objectMap["protectedItemType"] = avwsdpi.ProtectedItemType
}
@@ -11657,8 +11882,6 @@ type DPMProtectedItem struct {
BackupEngineName *string `json:"backupEngineName,omitempty"`
// ProtectionState - Protection state of the backup engine. Possible values include: 'ProtectedItemStateInvalid', 'ProtectedItemStateIRPending', 'ProtectedItemStateProtected', 'ProtectedItemStateProtectionError', 'ProtectedItemStateProtectionStopped', 'ProtectedItemStateProtectionPaused'
ProtectionState ProtectedItemState `json:"protectionState,omitempty"`
- // IsScheduledForDeferredDelete - To check if backup item is scheduled for deferred delete
- IsScheduledForDeferredDelete *bool `json:"isScheduledForDeferredDelete,omitempty"`
// ExtendedInfo - Extended info of the backup item.
ExtendedInfo *DPMProtectedItemExtendedInfo `json:"extendedInfo,omitempty"`
// BackupManagementType - Type of backup management for the backed up item. Possible values include: 'ManagementTypeInvalid', 'ManagementTypeAzureIaasVM', 'ManagementTypeMAB', 'ManagementTypeDPM', 'ManagementTypeAzureBackupServer', 'ManagementTypeAzureSQL', 'ManagementTypeAzureStorage', 'ManagementTypeAzureWorkload', 'ManagementTypeDefaultBackup'
@@ -11677,6 +11900,16 @@ type DPMProtectedItem struct {
BackupSetName *string `json:"backupSetName,omitempty"`
// CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover'
CreateMode CreateMode `json:"createMode,omitempty"`
+ // DeferredDeleteTimeInUTC - Time for deferred deletion in UTC
+ DeferredDeleteTimeInUTC *date.Time `json:"deferredDeleteTimeInUTC,omitempty"`
+ // IsScheduledForDeferredDelete - Flag to identify whether the DS is scheduled for deferred delete
+ IsScheduledForDeferredDelete *bool `json:"isScheduledForDeferredDelete,omitempty"`
+ // DeferredDeleteTimeRemaining - Time remaining before the DS marked for deferred delete is permanently deleted
+ DeferredDeleteTimeRemaining *string `json:"deferredDeleteTimeRemaining,omitempty"`
+ // IsDeferredDeleteScheduleUpcoming - Flag to identify whether the deferred deleted DS is to be purged soon
+ IsDeferredDeleteScheduleUpcoming *bool `json:"isDeferredDeleteScheduleUpcoming,omitempty"`
+ // IsRehydrate - Flag to identify that deferred deleted DS is to be moved into Pause state
+ IsRehydrate *bool `json:"isRehydrate,omitempty"`
// ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem'
ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"`
}
@@ -11694,9 +11927,6 @@ func (dpi DPMProtectedItem) MarshalJSON() ([]byte, error) {
if dpi.ProtectionState != "" {
objectMap["protectionState"] = dpi.ProtectionState
}
- if dpi.IsScheduledForDeferredDelete != nil {
- objectMap["isScheduledForDeferredDelete"] = dpi.IsScheduledForDeferredDelete
- }
if dpi.ExtendedInfo != nil {
objectMap["extendedInfo"] = dpi.ExtendedInfo
}
@@ -11724,6 +11954,21 @@ func (dpi DPMProtectedItem) MarshalJSON() ([]byte, error) {
if dpi.CreateMode != "" {
objectMap["createMode"] = dpi.CreateMode
}
+ if dpi.DeferredDeleteTimeInUTC != nil {
+ objectMap["deferredDeleteTimeInUTC"] = dpi.DeferredDeleteTimeInUTC
+ }
+ if dpi.IsScheduledForDeferredDelete != nil {
+ objectMap["isScheduledForDeferredDelete"] = dpi.IsScheduledForDeferredDelete
+ }
+ if dpi.DeferredDeleteTimeRemaining != nil {
+ objectMap["deferredDeleteTimeRemaining"] = dpi.DeferredDeleteTimeRemaining
+ }
+ if dpi.IsDeferredDeleteScheduleUpcoming != nil {
+ objectMap["isDeferredDeleteScheduleUpcoming"] = dpi.IsDeferredDeleteScheduleUpcoming
+ }
+ if dpi.IsRehydrate != nil {
+ objectMap["isRehydrate"] = dpi.IsRehydrate
+ }
if dpi.ProtectedItemType != "" {
objectMap["protectedItemType"] = dpi.ProtectedItemType
}
@@ -12653,6 +12898,16 @@ type GenericProtectedItem struct {
BackupSetName *string `json:"backupSetName,omitempty"`
// CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover'
CreateMode CreateMode `json:"createMode,omitempty"`
+ // DeferredDeleteTimeInUTC - Time for deferred deletion in UTC
+ DeferredDeleteTimeInUTC *date.Time `json:"deferredDeleteTimeInUTC,omitempty"`
+ // IsScheduledForDeferredDelete - Flag to identify whether the DS is scheduled for deferred delete
+ IsScheduledForDeferredDelete *bool `json:"isScheduledForDeferredDelete,omitempty"`
+ // DeferredDeleteTimeRemaining - Time remaining before the DS marked for deferred delete is permanently deleted
+ DeferredDeleteTimeRemaining *string `json:"deferredDeleteTimeRemaining,omitempty"`
+ // IsDeferredDeleteScheduleUpcoming - Flag to identify whether the deferred deleted DS is to be purged soon
+ IsDeferredDeleteScheduleUpcoming *bool `json:"isDeferredDeleteScheduleUpcoming,omitempty"`
+ // IsRehydrate - Flag to identify that deferred deleted DS is to be moved into Pause state
+ IsRehydrate *bool `json:"isRehydrate,omitempty"`
// ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem'
ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"`
}
@@ -12703,6 +12958,21 @@ func (gpi GenericProtectedItem) MarshalJSON() ([]byte, error) {
if gpi.CreateMode != "" {
objectMap["createMode"] = gpi.CreateMode
}
+ if gpi.DeferredDeleteTimeInUTC != nil {
+ objectMap["deferredDeleteTimeInUTC"] = gpi.DeferredDeleteTimeInUTC
+ }
+ if gpi.IsScheduledForDeferredDelete != nil {
+ objectMap["isScheduledForDeferredDelete"] = gpi.IsScheduledForDeferredDelete
+ }
+ if gpi.DeferredDeleteTimeRemaining != nil {
+ objectMap["deferredDeleteTimeRemaining"] = gpi.DeferredDeleteTimeRemaining
+ }
+ if gpi.IsDeferredDeleteScheduleUpcoming != nil {
+ objectMap["isDeferredDeleteScheduleUpcoming"] = gpi.IsDeferredDeleteScheduleUpcoming
+ }
+ if gpi.IsRehydrate != nil {
+ objectMap["isRehydrate"] = gpi.IsRehydrate
+ }
if gpi.ProtectedItemType != "" {
objectMap["protectedItemType"] = gpi.ProtectedItemType
}
@@ -14734,8 +15004,6 @@ type MabFileFolderProtectedItem struct {
LastBackupStatus *string `json:"lastBackupStatus,omitempty"`
// ProtectionState - Protected, ProtectionStopped, IRPending or ProtectionError
ProtectionState *string `json:"protectionState,omitempty"`
- // IsScheduledForDeferredDelete - Specifies if the item is scheduled for deferred deletion.
- IsScheduledForDeferredDelete *bool `json:"isScheduledForDeferredDelete,omitempty"`
// DeferredDeleteSyncTimeInUTC - Sync time for deferred deletion.
DeferredDeleteSyncTimeInUTC *int64 `json:"deferredDeleteSyncTimeInUTC,omitempty"`
// ExtendedInfo - Additional information with this backup item.
@@ -14756,6 +15024,16 @@ type MabFileFolderProtectedItem struct {
BackupSetName *string `json:"backupSetName,omitempty"`
// CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover'
CreateMode CreateMode `json:"createMode,omitempty"`
+ // DeferredDeleteTimeInUTC - Time for deferred deletion in UTC
+ DeferredDeleteTimeInUTC *date.Time `json:"deferredDeleteTimeInUTC,omitempty"`
+ // IsScheduledForDeferredDelete - Flag to identify whether the DS is scheduled for deferred delete
+ IsScheduledForDeferredDelete *bool `json:"isScheduledForDeferredDelete,omitempty"`
+ // DeferredDeleteTimeRemaining - Time remaining before the DS marked for deferred delete is permanently deleted
+ DeferredDeleteTimeRemaining *string `json:"deferredDeleteTimeRemaining,omitempty"`
+ // IsDeferredDeleteScheduleUpcoming - Flag to identify whether the deferred deleted DS is to be purged soon
+ IsDeferredDeleteScheduleUpcoming *bool `json:"isDeferredDeleteScheduleUpcoming,omitempty"`
+ // IsRehydrate - Flag to identify that deferred deleted DS is to be moved into Pause state
+ IsRehydrate *bool `json:"isRehydrate,omitempty"`
// ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem'
ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"`
}
@@ -14776,9 +15054,6 @@ func (mffpi MabFileFolderProtectedItem) MarshalJSON() ([]byte, error) {
if mffpi.ProtectionState != nil {
objectMap["protectionState"] = mffpi.ProtectionState
}
- if mffpi.IsScheduledForDeferredDelete != nil {
- objectMap["isScheduledForDeferredDelete"] = mffpi.IsScheduledForDeferredDelete
- }
if mffpi.DeferredDeleteSyncTimeInUTC != nil {
objectMap["deferredDeleteSyncTimeInUTC"] = mffpi.DeferredDeleteSyncTimeInUTC
}
@@ -14809,6 +15084,21 @@ func (mffpi MabFileFolderProtectedItem) MarshalJSON() ([]byte, error) {
if mffpi.CreateMode != "" {
objectMap["createMode"] = mffpi.CreateMode
}
+ if mffpi.DeferredDeleteTimeInUTC != nil {
+ objectMap["deferredDeleteTimeInUTC"] = mffpi.DeferredDeleteTimeInUTC
+ }
+ if mffpi.IsScheduledForDeferredDelete != nil {
+ objectMap["isScheduledForDeferredDelete"] = mffpi.IsScheduledForDeferredDelete
+ }
+ if mffpi.DeferredDeleteTimeRemaining != nil {
+ objectMap["deferredDeleteTimeRemaining"] = mffpi.DeferredDeleteTimeRemaining
+ }
+ if mffpi.IsDeferredDeleteScheduleUpcoming != nil {
+ objectMap["isDeferredDeleteScheduleUpcoming"] = mffpi.IsDeferredDeleteScheduleUpcoming
+ }
+ if mffpi.IsRehydrate != nil {
+ objectMap["isRehydrate"] = mffpi.IsRehydrate
+ }
if mffpi.ProtectedItemType != "" {
objectMap["protectedItemType"] = mffpi.ProtectedItemType
}
@@ -16229,6 +16519,16 @@ type ProtectedItem struct {
BackupSetName *string `json:"backupSetName,omitempty"`
// CreateMode - Create mode to indicate recovery of existing soft deleted data source or creation of new data source. Possible values include: 'CreateModeInvalid', 'CreateModeDefault', 'CreateModeRecover'
CreateMode CreateMode `json:"createMode,omitempty"`
+ // DeferredDeleteTimeInUTC - Time for deferred deletion in UTC
+ DeferredDeleteTimeInUTC *date.Time `json:"deferredDeleteTimeInUTC,omitempty"`
+ // IsScheduledForDeferredDelete - Flag to identify whether the DS is scheduled for deferred delete
+ IsScheduledForDeferredDelete *bool `json:"isScheduledForDeferredDelete,omitempty"`
+ // DeferredDeleteTimeRemaining - Time remaining before the DS marked for deferred delete is permanently deleted
+ DeferredDeleteTimeRemaining *string `json:"deferredDeleteTimeRemaining,omitempty"`
+ // IsDeferredDeleteScheduleUpcoming - Flag to identify whether the deferred deleted DS is to be purged soon
+ IsDeferredDeleteScheduleUpcoming *bool `json:"isDeferredDeleteScheduleUpcoming,omitempty"`
+ // IsRehydrate - Flag to identify that deferred deleted DS is to be moved into Pause state
+ IsRehydrate *bool `json:"isRehydrate,omitempty"`
// ProtectedItemType - Possible values include: 'ProtectedItemTypeProtectedItem', 'ProtectedItemTypeAzureFileShareProtectedItem', 'ProtectedItemTypeMicrosoftClassicComputevirtualMachines', 'ProtectedItemTypeMicrosoftComputevirtualMachines', 'ProtectedItemTypeAzureIaaSVMProtectedItem', 'ProtectedItemTypeMicrosoftSqlserversdatabases', 'ProtectedItemTypeAzureVMWorkloadProtectedItem', 'ProtectedItemTypeAzureVMWorkloadSAPAseDatabase', 'ProtectedItemTypeAzureVMWorkloadSAPHanaDatabase', 'ProtectedItemTypeAzureVMWorkloadSQLDatabase', 'ProtectedItemTypeDPMProtectedItem', 'ProtectedItemTypeGenericProtectedItem', 'ProtectedItemTypeMabFileFolderProtectedItem'
ProtectedItemType ProtectedItemType `json:"protectedItemType,omitempty"`
}
@@ -16342,6 +16642,21 @@ func (pi ProtectedItem) MarshalJSON() ([]byte, error) {
if pi.CreateMode != "" {
objectMap["createMode"] = pi.CreateMode
}
+ if pi.DeferredDeleteTimeInUTC != nil {
+ objectMap["deferredDeleteTimeInUTC"] = pi.DeferredDeleteTimeInUTC
+ }
+ if pi.IsScheduledForDeferredDelete != nil {
+ objectMap["isScheduledForDeferredDelete"] = pi.IsScheduledForDeferredDelete
+ }
+ if pi.DeferredDeleteTimeRemaining != nil {
+ objectMap["deferredDeleteTimeRemaining"] = pi.DeferredDeleteTimeRemaining
+ }
+ if pi.IsDeferredDeleteScheduleUpcoming != nil {
+ objectMap["isDeferredDeleteScheduleUpcoming"] = pi.IsDeferredDeleteScheduleUpcoming
+ }
+ if pi.IsRehydrate != nil {
+ objectMap["isRehydrate"] = pi.IsRehydrate
+ }
if pi.ProtectedItemType != "" {
objectMap["protectedItemType"] = pi.ProtectedItemType
}
diff --git a/services/resources/mgmt/2015-11-01/resources/deployments.go b/services/resources/mgmt/2015-11-01/resources/deployments.go
index cfc38a9affed..d9aff3390343 100644
--- a/services/resources/mgmt/2015-11-01/resources/deployments.go
+++ b/services/resources/mgmt/2015-11-01/resources/deployments.go
@@ -41,6 +41,78 @@ func NewDeploymentsClientWithBaseURI(baseURI string, subscriptionID string) Depl
return DeploymentsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
+// CalculateTemplateHash calculate the hash of the given template.
+// Parameters:
+// templateParameter - the template provided to calculate hash.
+func (client DeploymentsClient) CalculateTemplateHash(ctx context.Context, templateParameter interface{}) (result TemplateHashResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.CalculateTemplateHash")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.CalculateTemplateHashPreparer(ctx, templateParameter)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CalculateTemplateHash", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CalculateTemplateHashSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CalculateTemplateHash", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CalculateTemplateHashResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CalculateTemplateHash", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CalculateTemplateHashPreparer prepares the CalculateTemplateHash request.
+func (client DeploymentsClient) CalculateTemplateHashPreparer(ctx context.Context, templateParameter interface{}) (*http.Request, error) {
+ const APIVersion = "2015-11-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPath("/providers/Microsoft.Resources/calculateTemplateHash"),
+ autorest.WithJSON(templateParameter),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CalculateTemplateHashSender sends the CalculateTemplateHash request. The method will close the
+// http.Response Body if it receives an error.
+func (client DeploymentsClient) CalculateTemplateHashSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CalculateTemplateHashResponder handles the response to the CalculateTemplateHash request. The method always
+// closes the http.Response Body.
+func (client DeploymentsClient) CalculateTemplateHashResponder(resp *http.Response) (result TemplateHashResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
// Cancel cancel a currently running template deployment.
// Parameters:
// resourceGroupName - the name of the resource group. The name is case insensitive.
diff --git a/services/resources/mgmt/2015-11-01/resources/models.go b/services/resources/mgmt/2015-11-01/resources/models.go
index 2c8fc58d3926..981de1fe6401 100644
--- a/services/resources/mgmt/2015-11-01/resources/models.go
+++ b/services/resources/mgmt/2015-11-01/resources/models.go
@@ -56,6 +56,11 @@ type BasicDependency struct {
ResourceName *string `json:"resourceName,omitempty"`
}
+// CloudError an error response for a resource management request.
+type CloudError struct {
+ Error *ErrorResponse `json:"error,omitempty"`
+}
+
// Dependency deployment dependency information.
type Dependency struct {
// DependsOn - Gets the list of dependencies.
@@ -510,6 +515,28 @@ type DeploymentValidateResult struct {
Properties *DeploymentPropertiesExtended `json:"properties,omitempty"`
}
+// ErrorAdditionalInfo the resource management error additional info.
+type ErrorAdditionalInfo struct {
+ // Type - READ-ONLY; The additional info type.
+ Type *string `json:"type,omitempty"`
+ // Info - READ-ONLY; The additional info.
+ Info interface{} `json:"info,omitempty"`
+}
+
+// ErrorResponse the resource management error response.
+type ErrorResponse struct {
+ // Code - READ-ONLY; The error code.
+ Code *string `json:"code,omitempty"`
+ // Message - READ-ONLY; The error message.
+ Message *string `json:"message,omitempty"`
+ // Target - READ-ONLY; The error target.
+ Target *string `json:"target,omitempty"`
+ // Details - READ-ONLY; The error details.
+ Details *[]ErrorResponse `json:"details,omitempty"`
+ // AdditionalInfo - READ-ONLY; The error additional info.
+ AdditionalInfo *[]ErrorAdditionalInfo `json:"additionalInfo,omitempty"`
+}
+
// GenericResource resource information.
type GenericResource struct {
autorest.Response `json:"-"`
@@ -1752,6 +1779,16 @@ type TargetResource struct {
ResourceType *string `json:"resourceType,omitempty"`
}
+// TemplateHashResult result of the request to calculate template hash. It contains a string of minified
+// template and its hash.
+type TemplateHashResult struct {
+ autorest.Response `json:"-"`
+ // MinifiedTemplate - The minified template string.
+ MinifiedTemplate *string `json:"minifiedTemplate,omitempty"`
+ // TemplateHash - The template hash.
+ TemplateHash *string `json:"templateHash,omitempty"`
+}
+
// TemplateLink entity representing the reference to the template.
type TemplateLink struct {
// URI - URI referencing the template.
diff --git a/services/resources/mgmt/2015-11-01/resources/resourcesapi/interfaces.go b/services/resources/mgmt/2015-11-01/resources/resourcesapi/interfaces.go
index ba76d8c1b9cb..b322e9dcb533 100644
--- a/services/resources/mgmt/2015-11-01/resources/resourcesapi/interfaces.go
+++ b/services/resources/mgmt/2015-11-01/resources/resourcesapi/interfaces.go
@@ -25,6 +25,7 @@ import (
// DeploymentsClientAPI contains the set of methods on the DeploymentsClient type.
type DeploymentsClientAPI interface {
+ CalculateTemplateHash(ctx context.Context, templateParameter interface{}) (result resources.TemplateHashResult, err error)
Cancel(ctx context.Context, resourceGroupName string, deploymentName string) (result autorest.Response, err error)
CheckExistence(ctx context.Context, resourceGroupName string, deploymentName string) (result autorest.Response, err error)
CreateOrUpdate(ctx context.Context, resourceGroupName string, deploymentName string, parameters resources.Deployment) (result resources.DeploymentsCreateOrUpdateFuture, err error)
diff --git a/services/resources/mgmt/2016-02-01/resources/deployments.go b/services/resources/mgmt/2016-02-01/resources/deployments.go
index 13f3f58a3bf6..184c1fecc82c 100644
--- a/services/resources/mgmt/2016-02-01/resources/deployments.go
+++ b/services/resources/mgmt/2016-02-01/resources/deployments.go
@@ -41,6 +41,78 @@ func NewDeploymentsClientWithBaseURI(baseURI string, subscriptionID string) Depl
return DeploymentsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
+// CalculateTemplateHash calculate the hash of the given template.
+// Parameters:
+// templateParameter - the template provided to calculate hash.
+func (client DeploymentsClient) CalculateTemplateHash(ctx context.Context, templateParameter interface{}) (result TemplateHashResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.CalculateTemplateHash")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.CalculateTemplateHashPreparer(ctx, templateParameter)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CalculateTemplateHash", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CalculateTemplateHashSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CalculateTemplateHash", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CalculateTemplateHashResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CalculateTemplateHash", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CalculateTemplateHashPreparer prepares the CalculateTemplateHash request.
+func (client DeploymentsClient) CalculateTemplateHashPreparer(ctx context.Context, templateParameter interface{}) (*http.Request, error) {
+ const APIVersion = "2016-02-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPath("/providers/Microsoft.Resources/calculateTemplateHash"),
+ autorest.WithJSON(templateParameter),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CalculateTemplateHashSender sends the CalculateTemplateHash request. The method will close the
+// http.Response Body if it receives an error.
+func (client DeploymentsClient) CalculateTemplateHashSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CalculateTemplateHashResponder handles the response to the CalculateTemplateHash request. The method always
+// closes the http.Response Body.
+func (client DeploymentsClient) CalculateTemplateHashResponder(resp *http.Response) (result TemplateHashResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
// Cancel cancel a currently running template deployment.
// Parameters:
// resourceGroupName - the name of the resource group. The name is case insensitive.
diff --git a/services/resources/mgmt/2016-02-01/resources/models.go b/services/resources/mgmt/2016-02-01/resources/models.go
index 8a30b7225307..bfd5e4acb57e 100644
--- a/services/resources/mgmt/2016-02-01/resources/models.go
+++ b/services/resources/mgmt/2016-02-01/resources/models.go
@@ -85,6 +85,11 @@ type BasicDependency struct {
ResourceName *string `json:"resourceName,omitempty"`
}
+// CloudError an error response for a resource management request.
+type CloudError struct {
+ Error *ErrorResponse `json:"error,omitempty"`
+}
+
// DebugSetting ...
type DebugSetting struct {
// DetailLevel - The debug detail level.
@@ -562,6 +567,28 @@ type DeploymentValidateResult struct {
Properties *DeploymentPropertiesExtended `json:"properties,omitempty"`
}
+// ErrorAdditionalInfo the resource management error additional info.
+type ErrorAdditionalInfo struct {
+ // Type - READ-ONLY; The additional info type.
+ Type *string `json:"type,omitempty"`
+ // Info - READ-ONLY; The additional info.
+ Info interface{} `json:"info,omitempty"`
+}
+
+// ErrorResponse the resource management error response.
+type ErrorResponse struct {
+ // Code - READ-ONLY; The error code.
+ Code *string `json:"code,omitempty"`
+ // Message - READ-ONLY; The error message.
+ Message *string `json:"message,omitempty"`
+ // Target - READ-ONLY; The error target.
+ Target *string `json:"target,omitempty"`
+ // Details - READ-ONLY; The error details.
+ Details *[]ErrorResponse `json:"details,omitempty"`
+ // AdditionalInfo - READ-ONLY; The error additional info.
+ AdditionalInfo *[]ErrorAdditionalInfo `json:"additionalInfo,omitempty"`
+}
+
// ExportTemplateRequest export resource group template request parameters.
type ExportTemplateRequest struct {
// ResourcesProperty - The IDs of the resources to filter the export by. To export all resources, supply an array with single entry '*'.
@@ -1529,6 +1556,16 @@ type TargetResource struct {
ResourceType *string `json:"resourceType,omitempty"`
}
+// TemplateHashResult result of the request to calculate template hash. It contains a string of minified
+// template and its hash.
+type TemplateHashResult struct {
+ autorest.Response `json:"-"`
+ // MinifiedTemplate - The minified template string.
+ MinifiedTemplate *string `json:"minifiedTemplate,omitempty"`
+ // TemplateHash - The template hash.
+ TemplateHash *string `json:"templateHash,omitempty"`
+}
+
// TemplateLink entity representing the reference to the template.
type TemplateLink struct {
// URI - URI referencing the template.
diff --git a/services/resources/mgmt/2016-02-01/resources/resourcesapi/interfaces.go b/services/resources/mgmt/2016-02-01/resources/resourcesapi/interfaces.go
index 8f885224ae26..c22dda2f7a85 100644
--- a/services/resources/mgmt/2016-02-01/resources/resourcesapi/interfaces.go
+++ b/services/resources/mgmt/2016-02-01/resources/resourcesapi/interfaces.go
@@ -25,6 +25,7 @@ import (
// DeploymentsClientAPI contains the set of methods on the DeploymentsClient type.
type DeploymentsClientAPI interface {
+ CalculateTemplateHash(ctx context.Context, templateParameter interface{}) (result resources.TemplateHashResult, err error)
Cancel(ctx context.Context, resourceGroupName string, deploymentName string) (result autorest.Response, err error)
CheckExistence(ctx context.Context, resourceGroupName string, deploymentName string) (result autorest.Response, err error)
CreateOrUpdate(ctx context.Context, resourceGroupName string, deploymentName string, parameters resources.Deployment) (result resources.DeploymentsCreateOrUpdateFuture, err error)
diff --git a/services/resources/mgmt/2016-07-01/resources/deployments.go b/services/resources/mgmt/2016-07-01/resources/deployments.go
index eedec5aa9d18..19e2307dff36 100644
--- a/services/resources/mgmt/2016-07-01/resources/deployments.go
+++ b/services/resources/mgmt/2016-07-01/resources/deployments.go
@@ -41,6 +41,78 @@ func NewDeploymentsClientWithBaseURI(baseURI string, subscriptionID string) Depl
return DeploymentsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
+// CalculateTemplateHash calculate the hash of the given template.
+// Parameters:
+// templateParameter - the template provided to calculate hash.
+func (client DeploymentsClient) CalculateTemplateHash(ctx context.Context, templateParameter interface{}) (result TemplateHashResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.CalculateTemplateHash")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.CalculateTemplateHashPreparer(ctx, templateParameter)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CalculateTemplateHash", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CalculateTemplateHashSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CalculateTemplateHash", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CalculateTemplateHashResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CalculateTemplateHash", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CalculateTemplateHashPreparer prepares the CalculateTemplateHash request.
+func (client DeploymentsClient) CalculateTemplateHashPreparer(ctx context.Context, templateParameter interface{}) (*http.Request, error) {
+ const APIVersion = "2016-07-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPath("/providers/Microsoft.Resources/calculateTemplateHash"),
+ autorest.WithJSON(templateParameter),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CalculateTemplateHashSender sends the CalculateTemplateHash request. The method will close the
+// http.Response Body if it receives an error.
+func (client DeploymentsClient) CalculateTemplateHashSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CalculateTemplateHashResponder handles the response to the CalculateTemplateHash request. The method always
+// closes the http.Response Body.
+func (client DeploymentsClient) CalculateTemplateHashResponder(resp *http.Response) (result TemplateHashResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
// Cancel cancel a currently running template deployment.
// Parameters:
// resourceGroupName - the name of the resource group. The name is case insensitive.
diff --git a/services/resources/mgmt/2016-07-01/resources/models.go b/services/resources/mgmt/2016-07-01/resources/models.go
index d7ea2a97abc7..1f38702c53c5 100644
--- a/services/resources/mgmt/2016-07-01/resources/models.go
+++ b/services/resources/mgmt/2016-07-01/resources/models.go
@@ -85,6 +85,11 @@ type BasicDependency struct {
ResourceName *string `json:"resourceName,omitempty"`
}
+// CloudError an error response for a resource management request.
+type CloudError struct {
+ Error *ErrorResponse `json:"error,omitempty"`
+}
+
// DebugSetting ...
type DebugSetting struct {
// DetailLevel - The debug detail level.
@@ -562,6 +567,28 @@ type DeploymentValidateResult struct {
Properties *DeploymentPropertiesExtended `json:"properties,omitempty"`
}
+// ErrorAdditionalInfo the resource management error additional info.
+type ErrorAdditionalInfo struct {
+ // Type - READ-ONLY; The additional info type.
+ Type *string `json:"type,omitempty"`
+ // Info - READ-ONLY; The additional info.
+ Info interface{} `json:"info,omitempty"`
+}
+
+// ErrorResponse the resource management error response.
+type ErrorResponse struct {
+ // Code - READ-ONLY; The error code.
+ Code *string `json:"code,omitempty"`
+ // Message - READ-ONLY; The error message.
+ Message *string `json:"message,omitempty"`
+ // Target - READ-ONLY; The error target.
+ Target *string `json:"target,omitempty"`
+ // Details - READ-ONLY; The error details.
+ Details *[]ErrorResponse `json:"details,omitempty"`
+ // AdditionalInfo - READ-ONLY; The error additional info.
+ AdditionalInfo *[]ErrorAdditionalInfo `json:"additionalInfo,omitempty"`
+}
+
// ExportTemplateRequest export resource group template request parameters.
type ExportTemplateRequest struct {
// ResourcesProperty - The IDs of the resources to filter the export by. To export all resources, supply an array with single entry '*'.
@@ -1529,6 +1556,16 @@ type TargetResource struct {
ResourceType *string `json:"resourceType,omitempty"`
}
+// TemplateHashResult result of the request to calculate template hash. It contains a string of minified
+// template and its hash.
+type TemplateHashResult struct {
+ autorest.Response `json:"-"`
+ // MinifiedTemplate - The minified template string.
+ MinifiedTemplate *string `json:"minifiedTemplate,omitempty"`
+ // TemplateHash - The template hash.
+ TemplateHash *string `json:"templateHash,omitempty"`
+}
+
// TemplateLink entity representing the reference to the template.
type TemplateLink struct {
// URI - URI referencing the template.
diff --git a/services/resources/mgmt/2016-07-01/resources/resourcesapi/interfaces.go b/services/resources/mgmt/2016-07-01/resources/resourcesapi/interfaces.go
index 142046fdec6b..6bceb2cfa733 100644
--- a/services/resources/mgmt/2016-07-01/resources/resourcesapi/interfaces.go
+++ b/services/resources/mgmt/2016-07-01/resources/resourcesapi/interfaces.go
@@ -25,6 +25,7 @@ import (
// DeploymentsClientAPI contains the set of methods on the DeploymentsClient type.
type DeploymentsClientAPI interface {
+ CalculateTemplateHash(ctx context.Context, templateParameter interface{}) (result resources.TemplateHashResult, err error)
Cancel(ctx context.Context, resourceGroupName string, deploymentName string) (result autorest.Response, err error)
CheckExistence(ctx context.Context, resourceGroupName string, deploymentName string) (result autorest.Response, err error)
CreateOrUpdate(ctx context.Context, resourceGroupName string, deploymentName string, parameters resources.Deployment) (result resources.DeploymentsCreateOrUpdateFuture, err error)
diff --git a/services/resources/mgmt/2016-09-01/resources/deployments.go b/services/resources/mgmt/2016-09-01/resources/deployments.go
index f41ef6b3cd3b..420f4e1c80a2 100644
--- a/services/resources/mgmt/2016-09-01/resources/deployments.go
+++ b/services/resources/mgmt/2016-09-01/resources/deployments.go
@@ -41,6 +41,78 @@ func NewDeploymentsClientWithBaseURI(baseURI string, subscriptionID string) Depl
return DeploymentsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
+// CalculateTemplateHash calculate the hash of the given template.
+// Parameters:
+// templateParameter - the template provided to calculate hash.
+func (client DeploymentsClient) CalculateTemplateHash(ctx context.Context, templateParameter interface{}) (result TemplateHashResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.CalculateTemplateHash")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.CalculateTemplateHashPreparer(ctx, templateParameter)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CalculateTemplateHash", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CalculateTemplateHashSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CalculateTemplateHash", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CalculateTemplateHashResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CalculateTemplateHash", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CalculateTemplateHashPreparer prepares the CalculateTemplateHash request.
+func (client DeploymentsClient) CalculateTemplateHashPreparer(ctx context.Context, templateParameter interface{}) (*http.Request, error) {
+ const APIVersion = "2016-09-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPath("/providers/Microsoft.Resources/calculateTemplateHash"),
+ autorest.WithJSON(templateParameter),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CalculateTemplateHashSender sends the CalculateTemplateHash request. The method will close the
+// http.Response Body if it receives an error.
+func (client DeploymentsClient) CalculateTemplateHashSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CalculateTemplateHashResponder handles the response to the CalculateTemplateHash request. The method always
+// closes the http.Response Body.
+func (client DeploymentsClient) CalculateTemplateHashResponder(resp *http.Response) (result TemplateHashResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
// Cancel you can cancel a deployment only if the provisioningState is Accepted or Running. After the deployment is
// canceled, the provisioningState is set to Canceled. Canceling a template deployment stops the currently running
// template deployment and leaves the resource group partially deployed.
diff --git a/services/resources/mgmt/2016-09-01/resources/models.go b/services/resources/mgmt/2016-09-01/resources/models.go
index f6ccee9835ec..9cb5676da067 100644
--- a/services/resources/mgmt/2016-09-01/resources/models.go
+++ b/services/resources/mgmt/2016-09-01/resources/models.go
@@ -85,6 +85,11 @@ type BasicDependency struct {
ResourceName *string `json:"resourceName,omitempty"`
}
+// CloudError an error response for a resource management request.
+type CloudError struct {
+ Error *ErrorResponse `json:"error,omitempty"`
+}
+
// CreateOrUpdateByIDFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type CreateOrUpdateByIDFuture struct {
@@ -664,6 +669,28 @@ type DeploymentValidateResult struct {
Properties *DeploymentPropertiesExtended `json:"properties,omitempty"`
}
+// ErrorAdditionalInfo the resource management error additional info.
+type ErrorAdditionalInfo struct {
+ // Type - READ-ONLY; The additional info type.
+ Type *string `json:"type,omitempty"`
+ // Info - READ-ONLY; The additional info.
+ Info interface{} `json:"info,omitempty"`
+}
+
+// ErrorResponse the resource management error response.
+type ErrorResponse struct {
+ // Code - READ-ONLY; The error code.
+ Code *string `json:"code,omitempty"`
+ // Message - READ-ONLY; The error message.
+ Message *string `json:"message,omitempty"`
+ // Target - READ-ONLY; The error target.
+ Target *string `json:"target,omitempty"`
+ // Details - READ-ONLY; The error details.
+ Details *[]ErrorResponse `json:"details,omitempty"`
+ // AdditionalInfo - READ-ONLY; The error additional info.
+ AdditionalInfo *[]ErrorAdditionalInfo `json:"additionalInfo,omitempty"`
+}
+
// ExportTemplateRequest export resource group template request parameters.
type ExportTemplateRequest struct {
// ResourcesProperty - The IDs of the resources to filter the export by. To export all resources, supply an array with single entry '*'.
@@ -1636,6 +1663,16 @@ type TargetResource struct {
ResourceType *string `json:"resourceType,omitempty"`
}
+// TemplateHashResult result of the request to calculate template hash. It contains a string of minified
+// template and its hash.
+type TemplateHashResult struct {
+ autorest.Response `json:"-"`
+ // MinifiedTemplate - The minified template string.
+ MinifiedTemplate *string `json:"minifiedTemplate,omitempty"`
+ // TemplateHash - The template hash.
+ TemplateHash *string `json:"templateHash,omitempty"`
+}
+
// TemplateLink entity representing the reference to the template.
type TemplateLink struct {
// URI - The URI of the template to deploy.
diff --git a/services/resources/mgmt/2016-09-01/resources/resourcesapi/interfaces.go b/services/resources/mgmt/2016-09-01/resources/resourcesapi/interfaces.go
index dbc3fec4f916..51a9fb4ea5ff 100644
--- a/services/resources/mgmt/2016-09-01/resources/resourcesapi/interfaces.go
+++ b/services/resources/mgmt/2016-09-01/resources/resourcesapi/interfaces.go
@@ -25,6 +25,7 @@ import (
// DeploymentsClientAPI contains the set of methods on the DeploymentsClient type.
type DeploymentsClientAPI interface {
+ CalculateTemplateHash(ctx context.Context, templateParameter interface{}) (result resources.TemplateHashResult, err error)
Cancel(ctx context.Context, resourceGroupName string, deploymentName string) (result autorest.Response, err error)
CheckExistence(ctx context.Context, resourceGroupName string, deploymentName string) (result autorest.Response, err error)
CreateOrUpdate(ctx context.Context, resourceGroupName string, deploymentName string, parameters resources.Deployment) (result resources.DeploymentsCreateOrUpdateFuture, err error)
diff --git a/services/resources/mgmt/2017-05-10/resources/deployments.go b/services/resources/mgmt/2017-05-10/resources/deployments.go
index fd39477999e1..d37d3a30a93e 100644
--- a/services/resources/mgmt/2017-05-10/resources/deployments.go
+++ b/services/resources/mgmt/2017-05-10/resources/deployments.go
@@ -41,6 +41,78 @@ func NewDeploymentsClientWithBaseURI(baseURI string, subscriptionID string) Depl
return DeploymentsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
+// CalculateTemplateHash calculate the hash of the given template.
+// Parameters:
+// templateParameter - the template provided to calculate hash.
+func (client DeploymentsClient) CalculateTemplateHash(ctx context.Context, templateParameter interface{}) (result TemplateHashResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.CalculateTemplateHash")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.CalculateTemplateHashPreparer(ctx, templateParameter)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CalculateTemplateHash", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CalculateTemplateHashSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CalculateTemplateHash", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CalculateTemplateHashResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CalculateTemplateHash", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CalculateTemplateHashPreparer prepares the CalculateTemplateHash request.
+func (client DeploymentsClient) CalculateTemplateHashPreparer(ctx context.Context, templateParameter interface{}) (*http.Request, error) {
+ const APIVersion = "2017-05-10"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPath("/providers/Microsoft.Resources/calculateTemplateHash"),
+ autorest.WithJSON(templateParameter),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CalculateTemplateHashSender sends the CalculateTemplateHash request. The method will close the
+// http.Response Body if it receives an error.
+func (client DeploymentsClient) CalculateTemplateHashSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CalculateTemplateHashResponder handles the response to the CalculateTemplateHash request. The method always
+// closes the http.Response Body.
+func (client DeploymentsClient) CalculateTemplateHashResponder(resp *http.Response) (result TemplateHashResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
// Cancel you can cancel a deployment only if the provisioningState is Accepted or Running. After the deployment is
// canceled, the provisioningState is set to Canceled. Canceling a template deployment stops the currently running
// template deployment and leaves the resource group partially deployed.
diff --git a/services/resources/mgmt/2017-05-10/resources/models.go b/services/resources/mgmt/2017-05-10/resources/models.go
index 3ad29548688d..ca3a353a91fd 100644
--- a/services/resources/mgmt/2017-05-10/resources/models.go
+++ b/services/resources/mgmt/2017-05-10/resources/models.go
@@ -85,6 +85,11 @@ type BasicDependency struct {
ResourceName *string `json:"resourceName,omitempty"`
}
+// CloudError an error response for a resource management request.
+type CloudError struct {
+ Error *ErrorResponse `json:"error,omitempty"`
+}
+
// CreateOrUpdateByIDFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type CreateOrUpdateByIDFuture struct {
@@ -664,6 +669,28 @@ type DeploymentValidateResult struct {
Properties *DeploymentPropertiesExtended `json:"properties,omitempty"`
}
+// ErrorAdditionalInfo the resource management error additional info.
+type ErrorAdditionalInfo struct {
+ // Type - READ-ONLY; The additional info type.
+ Type *string `json:"type,omitempty"`
+ // Info - READ-ONLY; The additional info.
+ Info interface{} `json:"info,omitempty"`
+}
+
+// ErrorResponse the resource management error response.
+type ErrorResponse struct {
+ // Code - READ-ONLY; The error code.
+ Code *string `json:"code,omitempty"`
+ // Message - READ-ONLY; The error message.
+ Message *string `json:"message,omitempty"`
+ // Target - READ-ONLY; The error target.
+ Target *string `json:"target,omitempty"`
+ // Details - READ-ONLY; The error details.
+ Details *[]ErrorResponse `json:"details,omitempty"`
+ // AdditionalInfo - READ-ONLY; The error additional info.
+ AdditionalInfo *[]ErrorAdditionalInfo `json:"additionalInfo,omitempty"`
+}
+
// ExportTemplateRequest export resource group template request parameters.
type ExportTemplateRequest struct {
// ResourcesProperty - The IDs of the resources to filter the export by. To export all resources, supply an array with single entry '*'.
@@ -1667,6 +1694,16 @@ type TargetResource struct {
ResourceType *string `json:"resourceType,omitempty"`
}
+// TemplateHashResult result of the request to calculate template hash. It contains a string of minified
+// template and its hash.
+type TemplateHashResult struct {
+ autorest.Response `json:"-"`
+ // MinifiedTemplate - The minified template string.
+ MinifiedTemplate *string `json:"minifiedTemplate,omitempty"`
+ // TemplateHash - The template hash.
+ TemplateHash *string `json:"templateHash,omitempty"`
+}
+
// TemplateLink entity representing the reference to the template.
type TemplateLink struct {
// URI - The URI of the template to deploy.
diff --git a/services/resources/mgmt/2017-05-10/resources/resourcesapi/interfaces.go b/services/resources/mgmt/2017-05-10/resources/resourcesapi/interfaces.go
index 4b937751dbf3..b96760bd4ab7 100644
--- a/services/resources/mgmt/2017-05-10/resources/resourcesapi/interfaces.go
+++ b/services/resources/mgmt/2017-05-10/resources/resourcesapi/interfaces.go
@@ -25,6 +25,7 @@ import (
// DeploymentsClientAPI contains the set of methods on the DeploymentsClient type.
type DeploymentsClientAPI interface {
+ CalculateTemplateHash(ctx context.Context, templateParameter interface{}) (result resources.TemplateHashResult, err error)
Cancel(ctx context.Context, resourceGroupName string, deploymentName string) (result autorest.Response, err error)
CheckExistence(ctx context.Context, resourceGroupName string, deploymentName string) (result autorest.Response, err error)
CreateOrUpdate(ctx context.Context, resourceGroupName string, deploymentName string, parameters resources.Deployment) (result resources.DeploymentsCreateOrUpdateFuture, err error)
diff --git a/services/resources/mgmt/2018-02-01/resources/deployments.go b/services/resources/mgmt/2018-02-01/resources/deployments.go
index 30d60648806c..bd475c488953 100644
--- a/services/resources/mgmt/2018-02-01/resources/deployments.go
+++ b/services/resources/mgmt/2018-02-01/resources/deployments.go
@@ -41,6 +41,78 @@ func NewDeploymentsClientWithBaseURI(baseURI string, subscriptionID string) Depl
return DeploymentsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
+// CalculateTemplateHash calculate the hash of the given template.
+// Parameters:
+// templateParameter - the template provided to calculate hash.
+func (client DeploymentsClient) CalculateTemplateHash(ctx context.Context, templateParameter interface{}) (result TemplateHashResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.CalculateTemplateHash")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.CalculateTemplateHashPreparer(ctx, templateParameter)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CalculateTemplateHash", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CalculateTemplateHashSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CalculateTemplateHash", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CalculateTemplateHashResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CalculateTemplateHash", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CalculateTemplateHashPreparer prepares the CalculateTemplateHash request.
+func (client DeploymentsClient) CalculateTemplateHashPreparer(ctx context.Context, templateParameter interface{}) (*http.Request, error) {
+ const APIVersion = "2018-02-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPath("/providers/Microsoft.Resources/calculateTemplateHash"),
+ autorest.WithJSON(templateParameter),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CalculateTemplateHashSender sends the CalculateTemplateHash request. The method will close the
+// http.Response Body if it receives an error.
+func (client DeploymentsClient) CalculateTemplateHashSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CalculateTemplateHashResponder handles the response to the CalculateTemplateHash request. The method always
+// closes the http.Response Body.
+func (client DeploymentsClient) CalculateTemplateHashResponder(resp *http.Response) (result TemplateHashResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
// Cancel you can cancel a deployment only if the provisioningState is Accepted or Running. After the deployment is
// canceled, the provisioningState is set to Canceled. Canceling a template deployment stops the currently running
// template deployment and leaves the resource group partially deployed.
diff --git a/services/resources/mgmt/2018-02-01/resources/models.go b/services/resources/mgmt/2018-02-01/resources/models.go
index c5add3c7e999..45e502f42333 100644
--- a/services/resources/mgmt/2018-02-01/resources/models.go
+++ b/services/resources/mgmt/2018-02-01/resources/models.go
@@ -106,6 +106,11 @@ type BasicDependency struct {
ResourceName *string `json:"resourceName,omitempty"`
}
+// CloudError an error response for a resource management request.
+type CloudError struct {
+ Error *ErrorResponse `json:"error,omitempty"`
+}
+
// CreateOrUpdateByIDFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type CreateOrUpdateByIDFuture struct {
@@ -689,6 +694,28 @@ type DeploymentValidateResult struct {
Properties *DeploymentPropertiesExtended `json:"properties,omitempty"`
}
+// ErrorAdditionalInfo the resource management error additional info.
+type ErrorAdditionalInfo struct {
+ // Type - READ-ONLY; The additional info type.
+ Type *string `json:"type,omitempty"`
+ // Info - READ-ONLY; The additional info.
+ Info interface{} `json:"info,omitempty"`
+}
+
+// ErrorResponse the resource management error response.
+type ErrorResponse struct {
+ // Code - READ-ONLY; The error code.
+ Code *string `json:"code,omitempty"`
+ // Message - READ-ONLY; The error message.
+ Message *string `json:"message,omitempty"`
+ // Target - READ-ONLY; The error target.
+ Target *string `json:"target,omitempty"`
+ // Details - READ-ONLY; The error details.
+ Details *[]ErrorResponse `json:"details,omitempty"`
+ // AdditionalInfo - READ-ONLY; The error additional info.
+ AdditionalInfo *[]ErrorAdditionalInfo `json:"additionalInfo,omitempty"`
+}
+
// ExportTemplateRequest export resource group template request parameters.
type ExportTemplateRequest struct {
// ResourcesProperty - The IDs of the resources to filter the export by. To export all resources, supply an array with single entry '*'.
@@ -1710,6 +1737,16 @@ type TargetResource struct {
ResourceType *string `json:"resourceType,omitempty"`
}
+// TemplateHashResult result of the request to calculate template hash. It contains a string of minified
+// template and its hash.
+type TemplateHashResult struct {
+ autorest.Response `json:"-"`
+ // MinifiedTemplate - The minified template string.
+ MinifiedTemplate *string `json:"minifiedTemplate,omitempty"`
+ // TemplateHash - The template hash.
+ TemplateHash *string `json:"templateHash,omitempty"`
+}
+
// TemplateLink entity representing the reference to the template.
type TemplateLink struct {
// URI - The URI of the template to deploy.
diff --git a/services/resources/mgmt/2018-02-01/resources/resourcesapi/interfaces.go b/services/resources/mgmt/2018-02-01/resources/resourcesapi/interfaces.go
index aea77d85b4c6..86a9d02aac53 100644
--- a/services/resources/mgmt/2018-02-01/resources/resourcesapi/interfaces.go
+++ b/services/resources/mgmt/2018-02-01/resources/resourcesapi/interfaces.go
@@ -25,6 +25,7 @@ import (
// DeploymentsClientAPI contains the set of methods on the DeploymentsClient type.
type DeploymentsClientAPI interface {
+ CalculateTemplateHash(ctx context.Context, templateParameter interface{}) (result resources.TemplateHashResult, err error)
Cancel(ctx context.Context, resourceGroupName string, deploymentName string) (result autorest.Response, err error)
CheckExistence(ctx context.Context, resourceGroupName string, deploymentName string) (result autorest.Response, err error)
CreateOrUpdate(ctx context.Context, resourceGroupName string, deploymentName string, parameters resources.Deployment) (result resources.DeploymentsCreateOrUpdateFuture, err error)
diff --git a/services/resources/mgmt/2018-05-01/resources/deployments.go b/services/resources/mgmt/2018-05-01/resources/deployments.go
index 3b29109294eb..24aca4044097 100644
--- a/services/resources/mgmt/2018-05-01/resources/deployments.go
+++ b/services/resources/mgmt/2018-05-01/resources/deployments.go
@@ -41,6 +41,78 @@ func NewDeploymentsClientWithBaseURI(baseURI string, subscriptionID string) Depl
return DeploymentsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
+// CalculateTemplateHash calculate the hash of the given template.
+// Parameters:
+// templateParameter - the template provided to calculate hash.
+func (client DeploymentsClient) CalculateTemplateHash(ctx context.Context, templateParameter interface{}) (result TemplateHashResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.CalculateTemplateHash")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.CalculateTemplateHashPreparer(ctx, templateParameter)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CalculateTemplateHash", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CalculateTemplateHashSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CalculateTemplateHash", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CalculateTemplateHashResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CalculateTemplateHash", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CalculateTemplateHashPreparer prepares the CalculateTemplateHash request.
+func (client DeploymentsClient) CalculateTemplateHashPreparer(ctx context.Context, templateParameter interface{}) (*http.Request, error) {
+ const APIVersion = "2018-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPath("/providers/Microsoft.Resources/calculateTemplateHash"),
+ autorest.WithJSON(templateParameter),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CalculateTemplateHashSender sends the CalculateTemplateHash request. The method will close the
+// http.Response Body if it receives an error.
+func (client DeploymentsClient) CalculateTemplateHashSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CalculateTemplateHashResponder handles the response to the CalculateTemplateHash request. The method always
+// closes the http.Response Body.
+func (client DeploymentsClient) CalculateTemplateHashResponder(resp *http.Response) (result TemplateHashResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
// Cancel you can cancel a deployment only if the provisioningState is Accepted or Running. After the deployment is
// canceled, the provisioningState is set to Canceled. Canceling a template deployment stops the currently running
// template deployment and leaves the resource group partially deployed.
diff --git a/services/resources/mgmt/2018-05-01/resources/models.go b/services/resources/mgmt/2018-05-01/resources/models.go
index 6e1d4d758c40..bbe2e398e39d 100644
--- a/services/resources/mgmt/2018-05-01/resources/models.go
+++ b/services/resources/mgmt/2018-05-01/resources/models.go
@@ -106,6 +106,11 @@ type BasicDependency struct {
ResourceName *string `json:"resourceName,omitempty"`
}
+// CloudError an error response for a resource management request.
+type CloudError struct {
+ Error *ErrorResponse `json:"error,omitempty"`
+}
+
// CreateOrUpdateByIDFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type CreateOrUpdateByIDFuture struct {
@@ -747,6 +752,28 @@ type DeploymentValidateResult struct {
Properties *DeploymentPropertiesExtended `json:"properties,omitempty"`
}
+// ErrorAdditionalInfo the resource management error additional info.
+type ErrorAdditionalInfo struct {
+ // Type - READ-ONLY; The additional info type.
+ Type *string `json:"type,omitempty"`
+ // Info - READ-ONLY; The additional info.
+ Info interface{} `json:"info,omitempty"`
+}
+
+// ErrorResponse the resource management error response.
+type ErrorResponse struct {
+ // Code - READ-ONLY; The error code.
+ Code *string `json:"code,omitempty"`
+ // Message - READ-ONLY; The error message.
+ Message *string `json:"message,omitempty"`
+ // Target - READ-ONLY; The error target.
+ Target *string `json:"target,omitempty"`
+ // Details - READ-ONLY; The error details.
+ Details *[]ErrorResponse `json:"details,omitempty"`
+ // AdditionalInfo - READ-ONLY; The error additional info.
+ AdditionalInfo *[]ErrorAdditionalInfo `json:"additionalInfo,omitempty"`
+}
+
// ExportTemplateRequest export resource group template request parameters.
type ExportTemplateRequest struct {
// ResourcesProperty - The IDs of the resources to filter the export by. To export all resources, supply an array with single entry '*'.
@@ -1956,6 +1983,16 @@ type TargetResource struct {
ResourceType *string `json:"resourceType,omitempty"`
}
+// TemplateHashResult result of the request to calculate template hash. It contains a string of minified
+// template and its hash.
+type TemplateHashResult struct {
+ autorest.Response `json:"-"`
+ // MinifiedTemplate - The minified template string.
+ MinifiedTemplate *string `json:"minifiedTemplate,omitempty"`
+ // TemplateHash - The template hash.
+ TemplateHash *string `json:"templateHash,omitempty"`
+}
+
// TemplateLink entity representing the reference to the template.
type TemplateLink struct {
// URI - The URI of the template to deploy.
diff --git a/services/resources/mgmt/2018-05-01/resources/resourcesapi/interfaces.go b/services/resources/mgmt/2018-05-01/resources/resourcesapi/interfaces.go
index 1b5dd50dd571..d8b1ac7b04fc 100644
--- a/services/resources/mgmt/2018-05-01/resources/resourcesapi/interfaces.go
+++ b/services/resources/mgmt/2018-05-01/resources/resourcesapi/interfaces.go
@@ -32,6 +32,7 @@ var _ OperationsClientAPI = (*resources.OperationsClient)(nil)
// DeploymentsClientAPI contains the set of methods on the DeploymentsClient type.
type DeploymentsClientAPI interface {
+ CalculateTemplateHash(ctx context.Context, templateParameter interface{}) (result resources.TemplateHashResult, err error)
Cancel(ctx context.Context, resourceGroupName string, deploymentName string) (result autorest.Response, err error)
CancelAtSubscriptionScope(ctx context.Context, deploymentName string) (result autorest.Response, err error)
CheckExistence(ctx context.Context, resourceGroupName string, deploymentName string) (result autorest.Response, err error)
diff --git a/services/resources/mgmt/2019-03-01/resources/deployments.go b/services/resources/mgmt/2019-03-01/resources/deployments.go
index 47edc33ed436..b65dff16bbd3 100644
--- a/services/resources/mgmt/2019-03-01/resources/deployments.go
+++ b/services/resources/mgmt/2019-03-01/resources/deployments.go
@@ -41,6 +41,78 @@ func NewDeploymentsClientWithBaseURI(baseURI string, subscriptionID string) Depl
return DeploymentsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
+// CalculateTemplateHash calculate the hash of the given template.
+// Parameters:
+// templateParameter - the template provided to calculate hash.
+func (client DeploymentsClient) CalculateTemplateHash(ctx context.Context, templateParameter interface{}) (result TemplateHashResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.CalculateTemplateHash")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.CalculateTemplateHashPreparer(ctx, templateParameter)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CalculateTemplateHash", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CalculateTemplateHashSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CalculateTemplateHash", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CalculateTemplateHashResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CalculateTemplateHash", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CalculateTemplateHashPreparer prepares the CalculateTemplateHash request.
+func (client DeploymentsClient) CalculateTemplateHashPreparer(ctx context.Context, templateParameter interface{}) (*http.Request, error) {
+ const APIVersion = "2019-03-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPath("/providers/Microsoft.Resources/calculateTemplateHash"),
+ autorest.WithJSON(templateParameter),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CalculateTemplateHashSender sends the CalculateTemplateHash request. The method will close the
+// http.Response Body if it receives an error.
+func (client DeploymentsClient) CalculateTemplateHashSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CalculateTemplateHashResponder handles the response to the CalculateTemplateHash request. The method always
+// closes the http.Response Body.
+func (client DeploymentsClient) CalculateTemplateHashResponder(resp *http.Response) (result TemplateHashResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
// Cancel you can cancel a deployment only if the provisioningState is Accepted or Running. After the deployment is
// canceled, the provisioningState is set to Canceled. Canceling a template deployment stops the currently running
// template deployment and leaves the resource group partially deployed.
diff --git a/services/resources/mgmt/2019-03-01/resources/models.go b/services/resources/mgmt/2019-03-01/resources/models.go
index 194aabca5a8a..5a354d589684 100644
--- a/services/resources/mgmt/2019-03-01/resources/models.go
+++ b/services/resources/mgmt/2019-03-01/resources/models.go
@@ -106,6 +106,11 @@ type BasicDependency struct {
ResourceName *string `json:"resourceName,omitempty"`
}
+// CloudError an error response for a resource management request.
+type CloudError struct {
+ Error *ErrorResponse `json:"error,omitempty"`
+}
+
// CreateOrUpdateByIDFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type CreateOrUpdateByIDFuture struct {
@@ -747,6 +752,28 @@ type DeploymentValidateResult struct {
Properties *DeploymentPropertiesExtended `json:"properties,omitempty"`
}
+// ErrorAdditionalInfo the resource management error additional info.
+type ErrorAdditionalInfo struct {
+ // Type - READ-ONLY; The additional info type.
+ Type *string `json:"type,omitempty"`
+ // Info - READ-ONLY; The additional info.
+ Info interface{} `json:"info,omitempty"`
+}
+
+// ErrorResponse the resource management error response.
+type ErrorResponse struct {
+ // Code - READ-ONLY; The error code.
+ Code *string `json:"code,omitempty"`
+ // Message - READ-ONLY; The error message.
+ Message *string `json:"message,omitempty"`
+ // Target - READ-ONLY; The error target.
+ Target *string `json:"target,omitempty"`
+ // Details - READ-ONLY; The error details.
+ Details *[]ErrorResponse `json:"details,omitempty"`
+ // AdditionalInfo - READ-ONLY; The error additional info.
+ AdditionalInfo *[]ErrorAdditionalInfo `json:"additionalInfo,omitempty"`
+}
+
// ExportTemplateRequest export resource group template request parameters.
type ExportTemplateRequest struct {
// ResourcesProperty - The IDs of the resources to filter the export by. To export all resources, supply an array with single entry '*'.
@@ -1963,6 +1990,16 @@ type TargetResource struct {
ResourceType *string `json:"resourceType,omitempty"`
}
+// TemplateHashResult result of the request to calculate template hash. It contains a string of minified
+// template and its hash.
+type TemplateHashResult struct {
+ autorest.Response `json:"-"`
+ // MinifiedTemplate - The minified template string.
+ MinifiedTemplate *string `json:"minifiedTemplate,omitempty"`
+ // TemplateHash - The template hash.
+ TemplateHash *string `json:"templateHash,omitempty"`
+}
+
// TemplateLink entity representing the reference to the template.
type TemplateLink struct {
// URI - The URI of the template to deploy.
diff --git a/services/resources/mgmt/2019-03-01/resources/resourcesapi/interfaces.go b/services/resources/mgmt/2019-03-01/resources/resourcesapi/interfaces.go
index 34d5c9072064..646618e0916d 100644
--- a/services/resources/mgmt/2019-03-01/resources/resourcesapi/interfaces.go
+++ b/services/resources/mgmt/2019-03-01/resources/resourcesapi/interfaces.go
@@ -32,6 +32,7 @@ var _ OperationsClientAPI = (*resources.OperationsClient)(nil)
// DeploymentsClientAPI contains the set of methods on the DeploymentsClient type.
type DeploymentsClientAPI interface {
+ CalculateTemplateHash(ctx context.Context, templateParameter interface{}) (result resources.TemplateHashResult, err error)
Cancel(ctx context.Context, resourceGroupName string, deploymentName string) (result autorest.Response, err error)
CancelAtSubscriptionScope(ctx context.Context, deploymentName string) (result autorest.Response, err error)
CheckExistence(ctx context.Context, resourceGroupName string, deploymentName string) (result autorest.Response, err error)
diff --git a/services/resources/mgmt/2019-05-01/resources/deployments.go b/services/resources/mgmt/2019-05-01/resources/deployments.go
index 02698bcdd9e0..5046f3d6d829 100644
--- a/services/resources/mgmt/2019-05-01/resources/deployments.go
+++ b/services/resources/mgmt/2019-05-01/resources/deployments.go
@@ -41,6 +41,78 @@ func NewDeploymentsClientWithBaseURI(baseURI string, subscriptionID string) Depl
return DeploymentsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
+// CalculateTemplateHash calculate the hash of the given template.
+// Parameters:
+// templateParameter - the template provided to calculate hash.
+func (client DeploymentsClient) CalculateTemplateHash(ctx context.Context, templateParameter interface{}) (result TemplateHashResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.CalculateTemplateHash")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.CalculateTemplateHashPreparer(ctx, templateParameter)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CalculateTemplateHash", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CalculateTemplateHashSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CalculateTemplateHash", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CalculateTemplateHashResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CalculateTemplateHash", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CalculateTemplateHashPreparer prepares the CalculateTemplateHash request.
+func (client DeploymentsClient) CalculateTemplateHashPreparer(ctx context.Context, templateParameter interface{}) (*http.Request, error) {
+ const APIVersion = "2019-05-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPath("/providers/Microsoft.Resources/calculateTemplateHash"),
+ autorest.WithJSON(templateParameter),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CalculateTemplateHashSender sends the CalculateTemplateHash request. The method will close the
+// http.Response Body if it receives an error.
+func (client DeploymentsClient) CalculateTemplateHashSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CalculateTemplateHashResponder handles the response to the CalculateTemplateHash request. The method always
+// closes the http.Response Body.
+func (client DeploymentsClient) CalculateTemplateHashResponder(resp *http.Response) (result TemplateHashResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
// Cancel you can cancel a deployment only if the provisioningState is Accepted or Running. After the deployment is
// canceled, the provisioningState is set to Canceled. Canceling a template deployment stops the currently running
// template deployment and leaves the resource group partially deployed.
diff --git a/services/resources/mgmt/2019-05-01/resources/models.go b/services/resources/mgmt/2019-05-01/resources/models.go
index b7b48feace97..89c032785eab 100644
--- a/services/resources/mgmt/2019-05-01/resources/models.go
+++ b/services/resources/mgmt/2019-05-01/resources/models.go
@@ -106,6 +106,11 @@ type BasicDependency struct {
ResourceName *string `json:"resourceName,omitempty"`
}
+// CloudError an error response for a resource management request.
+type CloudError struct {
+ Error *ErrorResponse `json:"error,omitempty"`
+}
+
// CreateOrUpdateByIDFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type CreateOrUpdateByIDFuture struct {
@@ -803,6 +808,28 @@ type DeploymentValidateResult struct {
Properties *DeploymentPropertiesExtended `json:"properties,omitempty"`
}
+// ErrorAdditionalInfo the resource management error additional info.
+type ErrorAdditionalInfo struct {
+ // Type - READ-ONLY; The additional info type.
+ Type *string `json:"type,omitempty"`
+ // Info - READ-ONLY; The additional info.
+ Info interface{} `json:"info,omitempty"`
+}
+
+// ErrorResponse the resource management error response.
+type ErrorResponse struct {
+ // Code - READ-ONLY; The error code.
+ Code *string `json:"code,omitempty"`
+ // Message - READ-ONLY; The error message.
+ Message *string `json:"message,omitempty"`
+ // Target - READ-ONLY; The error target.
+ Target *string `json:"target,omitempty"`
+ // Details - READ-ONLY; The error details.
+ Details *[]ErrorResponse `json:"details,omitempty"`
+ // AdditionalInfo - READ-ONLY; The error additional info.
+ AdditionalInfo *[]ErrorAdditionalInfo `json:"additionalInfo,omitempty"`
+}
+
// ExportTemplateRequest export resource group template request parameters.
type ExportTemplateRequest struct {
// ResourcesProperty - The IDs of the resources to filter the export by. To export all resources, supply an array with single entry '*'.
@@ -2021,6 +2048,16 @@ type TargetResource struct {
ResourceType *string `json:"resourceType,omitempty"`
}
+// TemplateHashResult result of the request to calculate template hash. It contains a string of minified
+// template and its hash.
+type TemplateHashResult struct {
+ autorest.Response `json:"-"`
+ // MinifiedTemplate - The minified template string.
+ MinifiedTemplate *string `json:"minifiedTemplate,omitempty"`
+ // TemplateHash - The template hash.
+ TemplateHash *string `json:"templateHash,omitempty"`
+}
+
// TemplateLink entity representing the reference to the template.
type TemplateLink struct {
// URI - The URI of the template to deploy.
diff --git a/services/resources/mgmt/2019-05-01/resources/resourcesapi/interfaces.go b/services/resources/mgmt/2019-05-01/resources/resourcesapi/interfaces.go
index 76208f6f3c83..86bfcb54ac54 100644
--- a/services/resources/mgmt/2019-05-01/resources/resourcesapi/interfaces.go
+++ b/services/resources/mgmt/2019-05-01/resources/resourcesapi/interfaces.go
@@ -32,6 +32,7 @@ var _ OperationsClientAPI = (*resources.OperationsClient)(nil)
// DeploymentsClientAPI contains the set of methods on the DeploymentsClient type.
type DeploymentsClientAPI interface {
+ CalculateTemplateHash(ctx context.Context, templateParameter interface{}) (result resources.TemplateHashResult, err error)
Cancel(ctx context.Context, resourceGroupName string, deploymentName string) (result autorest.Response, err error)
CancelAtManagementGroupScope(ctx context.Context, groupID string, deploymentName string) (result autorest.Response, err error)
CancelAtSubscriptionScope(ctx context.Context, deploymentName string) (result autorest.Response, err error)
diff --git a/services/resources/mgmt/2019-06-01/policy/assignments.go b/services/resources/mgmt/2019-06-01/policy/assignments.go
new file mode 100644
index 000000000000..0b8cb46951a2
--- /dev/null
+++ b/services/resources/mgmt/2019-06-01/policy/assignments.go
@@ -0,0 +1,966 @@
+package policy
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// AssignmentsClient is the to manage and control access to your resources, you can define customized policies and
+// assign them at a scope.
+type AssignmentsClient struct {
+ BaseClient
+}
+
+// NewAssignmentsClient creates an instance of the AssignmentsClient client.
+func NewAssignmentsClient(subscriptionID string) AssignmentsClient {
+ return NewAssignmentsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewAssignmentsClientWithBaseURI creates an instance of the AssignmentsClient client.
+func NewAssignmentsClientWithBaseURI(baseURI string, subscriptionID string) AssignmentsClient {
+ return AssignmentsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// Create this operation creates or updates a policy assignment with the given scope and name. Policy assignments apply
+// to all resources contained within their scope. For example, when you assign a policy at resource group scope, that
+// policy applies to all resources in the group.
+// Parameters:
+// scope - the scope of the policy assignment. Valid scopes are: management group (format:
+// '/providers/Microsoft.Management/managementGroups/{managementGroup}'), subscription (format:
+// '/subscriptions/{subscriptionId}'), resource group (format:
+// '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}', or resource (format:
+// '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/[{parentResourcePath}/]{resourceType}/{resourceName}'
+// policyAssignmentName - the name of the policy assignment.
+// parameters - parameters for the policy assignment.
+func (client AssignmentsClient) Create(ctx context.Context, scope string, policyAssignmentName string, parameters Assignment) (result Assignment, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AssignmentsClient.Create")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters.Sku", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.Sku.Name", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil {
+ return result, validation.NewError("policy.AssignmentsClient", "Create", err.Error())
+ }
+
+ req, err := client.CreatePreparer(ctx, scope, policyAssignmentName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "Create", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "Create", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "Create", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreatePreparer prepares the Create request.
+func (client AssignmentsClient) CreatePreparer(ctx context.Context, scope string, policyAssignmentName string, parameters Assignment) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "policyAssignmentName": autorest.Encode("path", policyAssignmentName),
+ "scope": scope,
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ parameters.ID = nil
+ parameters.Type = nil
+ parameters.Name = nil
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateSender sends the Create request. The method will close the
+// http.Response Body if it receives an error.
+func (client AssignmentsClient) CreateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CreateResponder handles the response to the Create request. The method always
+// closes the http.Response Body.
+func (client AssignmentsClient) CreateResponder(resp *http.Response) (result Assignment, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// CreateByID this operation creates or updates the policy assignment with the given ID. Policy assignments made on a
+// scope apply to all resources contained in that scope. For example, when you assign a policy to a resource group that
+// policy applies to all resources in the group. Policy assignment IDs have this format:
+// '{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}'. Valid scopes are: management
+// group (format: '/providers/Microsoft.Management/managementGroups/{managementGroup}'), subscription (format:
+// '/subscriptions/{subscriptionId}'), resource group (format:
+// '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}', or resource (format:
+// '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/[{parentResourcePath}/]{resourceType}/{resourceName}'.
+// Parameters:
+// policyAssignmentID - the ID of the policy assignment to create. Use the format
+// '{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}'.
+// parameters - parameters for policy assignment.
+func (client AssignmentsClient) CreateByID(ctx context.Context, policyAssignmentID string, parameters Assignment) (result Assignment, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AssignmentsClient.CreateByID")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters.Sku", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.Sku.Name", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil {
+ return result, validation.NewError("policy.AssignmentsClient", "CreateByID", err.Error())
+ }
+
+ req, err := client.CreateByIDPreparer(ctx, policyAssignmentID, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "CreateByID", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateByIDSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "CreateByID", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateByIDResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "CreateByID", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreateByIDPreparer prepares the CreateByID request.
+func (client AssignmentsClient) CreateByIDPreparer(ctx context.Context, policyAssignmentID string, parameters Assignment) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "policyAssignmentId": policyAssignmentID,
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ parameters.ID = nil
+ parameters.Type = nil
+ parameters.Name = nil
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/{policyAssignmentId}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateByIDSender sends the CreateByID request. The method will close the
+// http.Response Body if it receives an error.
+func (client AssignmentsClient) CreateByIDSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CreateByIDResponder handles the response to the CreateByID request. The method always
+// closes the http.Response Body.
+func (client AssignmentsClient) CreateByIDResponder(resp *http.Response) (result Assignment, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete this operation deletes a policy assignment, given its name and the scope it was created in. The scope of a
+// policy assignment is the part of its ID preceding
+// '/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}'.
+// Parameters:
+// scope - the scope of the policy assignment. Valid scopes are: management group (format:
+// '/providers/Microsoft.Management/managementGroups/{managementGroup}'), subscription (format:
+// '/subscriptions/{subscriptionId}'), resource group (format:
+// '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}', or resource (format:
+// '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/[{parentResourcePath}/]{resourceType}/{resourceName}'
+// policyAssignmentName - the name of the policy assignment to delete.
+func (client AssignmentsClient) Delete(ctx context.Context, scope string, policyAssignmentName string) (result Assignment, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AssignmentsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, scope, policyAssignmentName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client AssignmentsClient) DeletePreparer(ctx context.Context, scope string, policyAssignmentName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "policyAssignmentName": autorest.Encode("path", policyAssignmentName),
+ "scope": scope,
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client AssignmentsClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client AssignmentsClient) DeleteResponder(resp *http.Response) (result Assignment, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// DeleteByID this operation deletes the policy with the given ID. Policy assignment IDs have this format:
+// '{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}'. Valid formats for {scope} are:
+// '/providers/Microsoft.Management/managementGroups/{managementGroup}' (management group),
+// '/subscriptions/{subscriptionId}' (subscription),
+// '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' (resource group), or
+// '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/[{parentResourcePath}/]{resourceType}/{resourceName}'
+// (resource).
+// Parameters:
+// policyAssignmentID - the ID of the policy assignment to delete. Use the format
+// '{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}'.
+func (client AssignmentsClient) DeleteByID(ctx context.Context, policyAssignmentID string) (result Assignment, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AssignmentsClient.DeleteByID")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeleteByIDPreparer(ctx, policyAssignmentID)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "DeleteByID", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteByIDSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "DeleteByID", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteByIDResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "DeleteByID", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeleteByIDPreparer prepares the DeleteByID request.
+func (client AssignmentsClient) DeleteByIDPreparer(ctx context.Context, policyAssignmentID string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "policyAssignmentId": policyAssignmentID,
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/{policyAssignmentId}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteByIDSender sends the DeleteByID request. The method will close the
+// http.Response Body if it receives an error.
+func (client AssignmentsClient) DeleteByIDSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteByIDResponder handles the response to the DeleteByID request. The method always
+// closes the http.Response Body.
+func (client AssignmentsClient) DeleteByIDResponder(resp *http.Response) (result Assignment, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Get this operation retrieves a single policy assignment, given its name and the scope it was created at.
+// Parameters:
+// scope - the scope of the policy assignment. Valid scopes are: management group (format:
+// '/providers/Microsoft.Management/managementGroups/{managementGroup}'), subscription (format:
+// '/subscriptions/{subscriptionId}'), resource group (format:
+// '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}', or resource (format:
+// '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/[{parentResourcePath}/]{resourceType}/{resourceName}'
+// policyAssignmentName - the name of the policy assignment to get.
+func (client AssignmentsClient) Get(ctx context.Context, scope string, policyAssignmentName string) (result Assignment, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AssignmentsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, scope, policyAssignmentName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client AssignmentsClient) GetPreparer(ctx context.Context, scope string, policyAssignmentName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "policyAssignmentName": autorest.Encode("path", policyAssignmentName),
+ "scope": scope,
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client AssignmentsClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client AssignmentsClient) GetResponder(resp *http.Response) (result Assignment, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetByID the operation retrieves the policy assignment with the given ID. Policy assignment IDs have this format:
+// '{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}'. Valid scopes are: management
+// group (format: '/providers/Microsoft.Management/managementGroups/{managementGroup}'), subscription (format:
+// '/subscriptions/{subscriptionId}'), resource group (format:
+// '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}', or resource (format:
+// '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/[{parentResourcePath}/]{resourceType}/{resourceName}'.
+// Parameters:
+// policyAssignmentID - the ID of the policy assignment to get. Use the format
+// '{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}'.
+func (client AssignmentsClient) GetByID(ctx context.Context, policyAssignmentID string) (result Assignment, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AssignmentsClient.GetByID")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetByIDPreparer(ctx, policyAssignmentID)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "GetByID", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetByIDSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "GetByID", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetByIDResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "GetByID", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetByIDPreparer prepares the GetByID request.
+func (client AssignmentsClient) GetByIDPreparer(ctx context.Context, policyAssignmentID string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "policyAssignmentId": policyAssignmentID,
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/{policyAssignmentId}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetByIDSender sends the GetByID request. The method will close the
+// http.Response Body if it receives an error.
+func (client AssignmentsClient) GetByIDSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetByIDResponder handles the response to the GetByID request. The method always
+// closes the http.Response Body.
+func (client AssignmentsClient) GetByIDResponder(resp *http.Response) (result Assignment, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List this operation retrieves the list of all policy assignments associated with the given subscription that match
+// the optional given $filter. Valid values for $filter are: 'atScope()' or 'policyDefinitionId eq '{value}''. If
+// $filter is not provided, the unfiltered list includes all policy assignments associated with the subscription,
+// including those that apply directly or from management groups that contain the given subscription, as well as any
+// applied to objects contained within the subscription. If $filter=atScope() is provided, the returned list includes
+// all policy assignments that apply to the subscription, which is everything in the unfiltered list except those
+// applied to objects contained within the subscription. If $filter=policyDefinitionId eq '{value}' is provided, the
+// returned list includes all policy assignments of the policy definition whose id is {value}.
+// Parameters:
+// filter - the filter to apply on the operation. Valid values for $filter are: 'atScope()' or
+// 'policyDefinitionId eq '{value}''. If $filter is not provided, no filtering is performed.
+func (client AssignmentsClient) List(ctx context.Context, filter string) (result AssignmentListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AssignmentsClient.List")
+ defer func() {
+ sc := -1
+ if result.alr.Response.Response != nil {
+ sc = result.alr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx, filter)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.alr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.alr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client AssignmentsClient) ListPreparer(ctx context.Context, filter string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyAssignments", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client AssignmentsClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client AssignmentsClient) ListResponder(resp *http.Response) (result AssignmentListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client AssignmentsClient) listNextResults(ctx context.Context, lastResults AssignmentListResult) (result AssignmentListResult, err error) {
+ req, err := lastResults.assignmentListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "policy.AssignmentsClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "policy.AssignmentsClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client AssignmentsClient) ListComplete(ctx context.Context, filter string) (result AssignmentListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AssignmentsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, filter)
+ return
+}
+
+// ListForResource this operation retrieves the list of all policy assignments associated with the specified resource
+// in the given resource group and subscription that match the optional given $filter. Valid values for $filter are:
+// 'atScope()' or 'policyDefinitionId eq '{value}''. If $filter is not provided, the unfiltered list includes all
+// policy assignments associated with the resource, including those that apply directly or from all containing scopes,
+// as well as any applied to resources contained within the resource. If $filter=atScope() is provided, the returned
+// list includes all policy assignments that apply to the resource, which is everything in the unfiltered list except
+// those applied to resources contained within the resource. If $filter=policyDefinitionId eq '{value}' is provided,
+// the returned list includes all policy assignments of the policy definition whose id is {value} that apply to the
+// resource. Three parameters plus the resource name are used to identify a specific resource. If the resource is not
+// part of a parent resource (the more common case), the parent resource path should not be provided (or provided as
+// ''). For example a web app could be specified as ({resourceProviderNamespace} == 'Microsoft.Web',
+// {parentResourcePath} == '', {resourceType} == 'sites', {resourceName} == 'MyWebApp'). If the resource is part of a
+// parent resource, then all parameters should be provided. For example a virtual machine DNS name could be specified
+// as ({resourceProviderNamespace} == 'Microsoft.Compute', {parentResourcePath} == 'virtualMachines/MyVirtualMachine',
+// {resourceType} == 'domainNames', {resourceName} == 'MyComputerName'). A convenient alternative to providing the
+// namespace and type name separately is to provide both in the {resourceType} parameter, format:
+// ({resourceProviderNamespace} == '', {parentResourcePath} == '', {resourceType} == 'Microsoft.Web/sites',
+// {resourceName} == 'MyWebApp').
+// Parameters:
+// resourceGroupName - the name of the resource group containing the resource.
+// resourceProviderNamespace - the namespace of the resource provider. For example, the namespace of a virtual
+// machine is Microsoft.Compute (from Microsoft.Compute/virtualMachines)
+// parentResourcePath - the parent resource path. Use empty string if there is none.
+// resourceType - the resource type name. For example the type name of a web app is 'sites' (from
+// Microsoft.Web/sites).
+// resourceName - the name of the resource.
+// filter - the filter to apply on the operation. Valid values for $filter are: 'atScope()' or
+// 'policyDefinitionId eq '{value}''. If $filter is not provided, no filtering is performed.
+func (client AssignmentsClient) ListForResource(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, filter string) (result AssignmentListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AssignmentsClient.ListForResource")
+ defer func() {
+ sc := -1
+ if result.alr.Response.Response != nil {
+ sc = result.alr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("policy.AssignmentsClient", "ListForResource", err.Error())
+ }
+
+ result.fn = client.listForResourceNextResults
+ req, err := client.ListForResourcePreparer(ctx, resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, filter)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "ListForResource", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListForResourceSender(req)
+ if err != nil {
+ result.alr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "ListForResource", resp, "Failure sending request")
+ return
+ }
+
+ result.alr, err = client.ListForResourceResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "ListForResource", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListForResourcePreparer prepares the ListForResource request.
+func (client AssignmentsClient) ListForResourcePreparer(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, filter string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "parentResourcePath": parentResourcePath,
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "resourceName": autorest.Encode("path", resourceName),
+ "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace),
+ "resourceType": resourceType,
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/policyAssignments", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListForResourceSender sends the ListForResource request. The method will close the
+// http.Response Body if it receives an error.
+func (client AssignmentsClient) ListForResourceSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListForResourceResponder handles the response to the ListForResource request. The method always
+// closes the http.Response Body.
+func (client AssignmentsClient) ListForResourceResponder(resp *http.Response) (result AssignmentListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listForResourceNextResults retrieves the next set of results, if any.
+func (client AssignmentsClient) listForResourceNextResults(ctx context.Context, lastResults AssignmentListResult) (result AssignmentListResult, err error) {
+ req, err := lastResults.assignmentListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "policy.AssignmentsClient", "listForResourceNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListForResourceSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "policy.AssignmentsClient", "listForResourceNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListForResourceResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "listForResourceNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListForResourceComplete enumerates all values, automatically crossing page boundaries as required.
+func (client AssignmentsClient) ListForResourceComplete(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, filter string) (result AssignmentListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AssignmentsClient.ListForResource")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListForResource(ctx, resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, filter)
+ return
+}
+
+// ListForResourceGroup this operation retrieves the list of all policy assignments associated with the given resource
+// group in the given subscription that match the optional given $filter. Valid values for $filter are: 'atScope()' or
+// 'policyDefinitionId eq '{value}''. If $filter is not provided, the unfiltered list includes all policy assignments
+// associated with the resource group, including those that apply directly or apply from containing scopes, as well as
+// any applied to resources contained within the resource group. If $filter=atScope() is provided, the returned list
+// includes all policy assignments that apply to the resource group, which is everything in the unfiltered list except
+// those applied to resources contained within the resource group. If $filter=policyDefinitionId eq '{value}' is
+// provided, the returned list includes all policy assignments of the policy definition whose id is {value} that apply
+// to the resource group.
+// Parameters:
+// resourceGroupName - the name of the resource group that contains policy assignments.
+// filter - the filter to apply on the operation. Valid values for $filter are: 'atScope()' or
+// 'policyDefinitionId eq '{value}''. If $filter is not provided, no filtering is performed.
+func (client AssignmentsClient) ListForResourceGroup(ctx context.Context, resourceGroupName string, filter string) (result AssignmentListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AssignmentsClient.ListForResourceGroup")
+ defer func() {
+ sc := -1
+ if result.alr.Response.Response != nil {
+ sc = result.alr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("policy.AssignmentsClient", "ListForResourceGroup", err.Error())
+ }
+
+ result.fn = client.listForResourceGroupNextResults
+ req, err := client.ListForResourceGroupPreparer(ctx, resourceGroupName, filter)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "ListForResourceGroup", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListForResourceGroupSender(req)
+ if err != nil {
+ result.alr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "ListForResourceGroup", resp, "Failure sending request")
+ return
+ }
+
+ result.alr, err = client.ListForResourceGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "ListForResourceGroup", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListForResourceGroupPreparer prepares the ListForResourceGroup request.
+func (client AssignmentsClient) ListForResourceGroupPreparer(ctx context.Context, resourceGroupName string, filter string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(filter) > 0 {
+ queryParameters["$filter"] = filter
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/policyAssignments", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListForResourceGroupSender sends the ListForResourceGroup request. The method will close the
+// http.Response Body if it receives an error.
+func (client AssignmentsClient) ListForResourceGroupSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListForResourceGroupResponder handles the response to the ListForResourceGroup request. The method always
+// closes the http.Response Body.
+func (client AssignmentsClient) ListForResourceGroupResponder(resp *http.Response) (result AssignmentListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listForResourceGroupNextResults retrieves the next set of results, if any.
+func (client AssignmentsClient) listForResourceGroupNextResults(ctx context.Context, lastResults AssignmentListResult) (result AssignmentListResult, err error) {
+ req, err := lastResults.assignmentListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "policy.AssignmentsClient", "listForResourceGroupNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListForResourceGroupSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "policy.AssignmentsClient", "listForResourceGroupNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListForResourceGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "listForResourceGroupNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListForResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
+func (client AssignmentsClient) ListForResourceGroupComplete(ctx context.Context, resourceGroupName string, filter string) (result AssignmentListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AssignmentsClient.ListForResourceGroup")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListForResourceGroup(ctx, resourceGroupName, filter)
+ return
+}
diff --git a/services/resources/mgmt/2019-06-01/policy/client.go b/services/resources/mgmt/2019-06-01/policy/client.go
new file mode 100644
index 000000000000..a60923b29250
--- /dev/null
+++ b/services/resources/mgmt/2019-06-01/policy/client.go
@@ -0,0 +1,51 @@
+// Package policy implements the Azure ARM Policy service API version 2019-06-01.
+//
+// To manage and control access to your resources, you can define customized policies and assign them at a scope.
+package policy
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/Azure/go-autorest/autorest"
+)
+
+const (
+ // DefaultBaseURI is the default URI used for the service Policy
+ DefaultBaseURI = "https://management.azure.com"
+)
+
+// BaseClient is the base client for Policy.
+type BaseClient struct {
+ autorest.Client
+ BaseURI string
+ SubscriptionID string
+}
+
+// New creates an instance of the BaseClient client.
+func New(subscriptionID string) BaseClient {
+ return NewWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewWithBaseURI creates an instance of the BaseClient client.
+func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient {
+ return BaseClient{
+ Client: autorest.NewClientWithUserAgent(UserAgent()),
+ BaseURI: baseURI,
+ SubscriptionID: subscriptionID,
+ }
+}
diff --git a/services/resources/mgmt/2019-06-01/policy/definitions.go b/services/resources/mgmt/2019-06-01/policy/definitions.go
new file mode 100644
index 000000000000..0a697e0252da
--- /dev/null
+++ b/services/resources/mgmt/2019-06-01/policy/definitions.go
@@ -0,0 +1,910 @@
+package policy
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// DefinitionsClient is the to manage and control access to your resources, you can define customized policies and
+// assign them at a scope.
+type DefinitionsClient struct {
+ BaseClient
+}
+
+// NewDefinitionsClient creates an instance of the DefinitionsClient client.
+func NewDefinitionsClient(subscriptionID string) DefinitionsClient {
+ return NewDefinitionsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewDefinitionsClientWithBaseURI creates an instance of the DefinitionsClient client.
+func NewDefinitionsClientWithBaseURI(baseURI string, subscriptionID string) DefinitionsClient {
+ return DefinitionsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate this operation creates or updates a policy definition in the given subscription with the given name.
+// Parameters:
+// policyDefinitionName - the name of the policy definition to create.
+// parameters - the policy definition properties.
+func (client DefinitionsClient) CreateOrUpdate(ctx context.Context, policyDefinitionName string, parameters Definition) (result Definition, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DefinitionsClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.CreateOrUpdatePreparer(ctx, policyDefinitionName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateOrUpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "CreateOrUpdate", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "CreateOrUpdate", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client DefinitionsClient) CreateOrUpdatePreparer(ctx context.Context, policyDefinitionName string, parameters Definition) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "policyDefinitionName": autorest.Encode("path", policyDefinitionName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ parameters.ID = nil
+ parameters.Name = nil
+ parameters.Type = nil
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client DefinitionsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client DefinitionsClient) CreateOrUpdateResponder(resp *http.Response) (result Definition, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// CreateOrUpdateAtManagementGroup this operation creates or updates a policy definition in the given management group
+// with the given name.
+// Parameters:
+// policyDefinitionName - the name of the policy definition to create.
+// parameters - the policy definition properties.
+// managementGroupID - the ID of the management group.
+func (client DefinitionsClient) CreateOrUpdateAtManagementGroup(ctx context.Context, policyDefinitionName string, parameters Definition, managementGroupID string) (result Definition, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DefinitionsClient.CreateOrUpdateAtManagementGroup")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.CreateOrUpdateAtManagementGroupPreparer(ctx, policyDefinitionName, parameters, managementGroupID)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "CreateOrUpdateAtManagementGroup", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateOrUpdateAtManagementGroupSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "CreateOrUpdateAtManagementGroup", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateAtManagementGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "CreateOrUpdateAtManagementGroup", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreateOrUpdateAtManagementGroupPreparer prepares the CreateOrUpdateAtManagementGroup request.
+func (client DefinitionsClient) CreateOrUpdateAtManagementGroupPreparer(ctx context.Context, policyDefinitionName string, parameters Definition, managementGroupID string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "managementGroupId": autorest.Encode("path", managementGroupID),
+ "policyDefinitionName": autorest.Encode("path", policyDefinitionName),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ parameters.ID = nil
+ parameters.Name = nil
+ parameters.Type = nil
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/providers/Microsoft.Management/managementgroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateAtManagementGroupSender sends the CreateOrUpdateAtManagementGroup request. The method will close the
+// http.Response Body if it receives an error.
+func (client DefinitionsClient) CreateOrUpdateAtManagementGroupSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CreateOrUpdateAtManagementGroupResponder handles the response to the CreateOrUpdateAtManagementGroup request. The method always
+// closes the http.Response Body.
+func (client DefinitionsClient) CreateOrUpdateAtManagementGroupResponder(resp *http.Response) (result Definition, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete this operation deletes the policy definition in the given subscription with the given name.
+// Parameters:
+// policyDefinitionName - the name of the policy definition to delete.
+func (client DefinitionsClient) Delete(ctx context.Context, policyDefinitionName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DefinitionsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, policyDefinitionName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client DefinitionsClient) DeletePreparer(ctx context.Context, policyDefinitionName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "policyDefinitionName": autorest.Encode("path", policyDefinitionName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client DefinitionsClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client DefinitionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// DeleteAtManagementGroup this operation deletes the policy definition in the given management group with the given
+// name.
+// Parameters:
+// policyDefinitionName - the name of the policy definition to delete.
+// managementGroupID - the ID of the management group.
+func (client DefinitionsClient) DeleteAtManagementGroup(ctx context.Context, policyDefinitionName string, managementGroupID string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DefinitionsClient.DeleteAtManagementGroup")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeleteAtManagementGroupPreparer(ctx, policyDefinitionName, managementGroupID)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "DeleteAtManagementGroup", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteAtManagementGroupSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "DeleteAtManagementGroup", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteAtManagementGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "DeleteAtManagementGroup", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeleteAtManagementGroupPreparer prepares the DeleteAtManagementGroup request.
+func (client DefinitionsClient) DeleteAtManagementGroupPreparer(ctx context.Context, policyDefinitionName string, managementGroupID string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "managementGroupId": autorest.Encode("path", managementGroupID),
+ "policyDefinitionName": autorest.Encode("path", policyDefinitionName),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/providers/Microsoft.Management/managementgroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteAtManagementGroupSender sends the DeleteAtManagementGroup request. The method will close the
+// http.Response Body if it receives an error.
+func (client DefinitionsClient) DeleteAtManagementGroupSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteAtManagementGroupResponder handles the response to the DeleteAtManagementGroup request. The method always
+// closes the http.Response Body.
+func (client DefinitionsClient) DeleteAtManagementGroupResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get this operation retrieves the policy definition in the given subscription with the given name.
+// Parameters:
+// policyDefinitionName - the name of the policy definition to get.
+func (client DefinitionsClient) Get(ctx context.Context, policyDefinitionName string) (result Definition, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DefinitionsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, policyDefinitionName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client DefinitionsClient) GetPreparer(ctx context.Context, policyDefinitionName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "policyDefinitionName": autorest.Encode("path", policyDefinitionName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client DefinitionsClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client DefinitionsClient) GetResponder(resp *http.Response) (result Definition, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetAtManagementGroup this operation retrieves the policy definition in the given management group with the given
+// name.
+// Parameters:
+// policyDefinitionName - the name of the policy definition to get.
+// managementGroupID - the ID of the management group.
+func (client DefinitionsClient) GetAtManagementGroup(ctx context.Context, policyDefinitionName string, managementGroupID string) (result Definition, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DefinitionsClient.GetAtManagementGroup")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetAtManagementGroupPreparer(ctx, policyDefinitionName, managementGroupID)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "GetAtManagementGroup", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetAtManagementGroupSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "GetAtManagementGroup", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetAtManagementGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "GetAtManagementGroup", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetAtManagementGroupPreparer prepares the GetAtManagementGroup request.
+func (client DefinitionsClient) GetAtManagementGroupPreparer(ctx context.Context, policyDefinitionName string, managementGroupID string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "managementGroupId": autorest.Encode("path", managementGroupID),
+ "policyDefinitionName": autorest.Encode("path", policyDefinitionName),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/providers/Microsoft.Management/managementgroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetAtManagementGroupSender sends the GetAtManagementGroup request. The method will close the
+// http.Response Body if it receives an error.
+func (client DefinitionsClient) GetAtManagementGroupSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetAtManagementGroupResponder handles the response to the GetAtManagementGroup request. The method always
+// closes the http.Response Body.
+func (client DefinitionsClient) GetAtManagementGroupResponder(resp *http.Response) (result Definition, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetBuiltIn this operation retrieves the built-in policy definition with the given name.
+// Parameters:
+// policyDefinitionName - the name of the built-in policy definition to get.
+func (client DefinitionsClient) GetBuiltIn(ctx context.Context, policyDefinitionName string) (result Definition, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DefinitionsClient.GetBuiltIn")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetBuiltInPreparer(ctx, policyDefinitionName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "GetBuiltIn", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetBuiltInSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "GetBuiltIn", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetBuiltInResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "GetBuiltIn", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetBuiltInPreparer prepares the GetBuiltIn request.
+func (client DefinitionsClient) GetBuiltInPreparer(ctx context.Context, policyDefinitionName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "policyDefinitionName": autorest.Encode("path", policyDefinitionName),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetBuiltInSender sends the GetBuiltIn request. The method will close the
+// http.Response Body if it receives an error.
+func (client DefinitionsClient) GetBuiltInSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetBuiltInResponder handles the response to the GetBuiltIn request. The method always
+// closes the http.Response Body.
+func (client DefinitionsClient) GetBuiltInResponder(resp *http.Response) (result Definition, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List this operation retrieves a list of all the policy definitions in a given subscription.
+func (client DefinitionsClient) List(ctx context.Context) (result DefinitionListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DefinitionsClient.List")
+ defer func() {
+ sc := -1
+ if result.dlr.Response.Response != nil {
+ sc = result.dlr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.dlr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.dlr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client DefinitionsClient) ListPreparer(ctx context.Context) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyDefinitions", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client DefinitionsClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client DefinitionsClient) ListResponder(resp *http.Response) (result DefinitionListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client DefinitionsClient) listNextResults(ctx context.Context, lastResults DefinitionListResult) (result DefinitionListResult, err error) {
+ req, err := lastResults.definitionListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "policy.DefinitionsClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "policy.DefinitionsClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client DefinitionsClient) ListComplete(ctx context.Context) (result DefinitionListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DefinitionsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx)
+ return
+}
+
+// ListBuiltIn this operation retrieves a list of all the built-in policy definitions.
+func (client DefinitionsClient) ListBuiltIn(ctx context.Context) (result DefinitionListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DefinitionsClient.ListBuiltIn")
+ defer func() {
+ sc := -1
+ if result.dlr.Response.Response != nil {
+ sc = result.dlr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listBuiltInNextResults
+ req, err := client.ListBuiltInPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "ListBuiltIn", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListBuiltInSender(req)
+ if err != nil {
+ result.dlr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "ListBuiltIn", resp, "Failure sending request")
+ return
+ }
+
+ result.dlr, err = client.ListBuiltInResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "ListBuiltIn", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListBuiltInPreparer prepares the ListBuiltIn request.
+func (client DefinitionsClient) ListBuiltInPreparer(ctx context.Context) (*http.Request, error) {
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPath("/providers/Microsoft.Authorization/policyDefinitions"),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListBuiltInSender sends the ListBuiltIn request. The method will close the
+// http.Response Body if it receives an error.
+func (client DefinitionsClient) ListBuiltInSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListBuiltInResponder handles the response to the ListBuiltIn request. The method always
+// closes the http.Response Body.
+func (client DefinitionsClient) ListBuiltInResponder(resp *http.Response) (result DefinitionListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listBuiltInNextResults retrieves the next set of results, if any.
+func (client DefinitionsClient) listBuiltInNextResults(ctx context.Context, lastResults DefinitionListResult) (result DefinitionListResult, err error) {
+ req, err := lastResults.definitionListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "policy.DefinitionsClient", "listBuiltInNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListBuiltInSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "policy.DefinitionsClient", "listBuiltInNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListBuiltInResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "listBuiltInNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListBuiltInComplete enumerates all values, automatically crossing page boundaries as required.
+func (client DefinitionsClient) ListBuiltInComplete(ctx context.Context) (result DefinitionListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DefinitionsClient.ListBuiltIn")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListBuiltIn(ctx)
+ return
+}
+
+// ListByManagementGroup this operation retrieves a list of all the policy definitions in a given management group.
+// Parameters:
+// managementGroupID - the ID of the management group.
+func (client DefinitionsClient) ListByManagementGroup(ctx context.Context, managementGroupID string) (result DefinitionListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DefinitionsClient.ListByManagementGroup")
+ defer func() {
+ sc := -1
+ if result.dlr.Response.Response != nil {
+ sc = result.dlr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listByManagementGroupNextResults
+ req, err := client.ListByManagementGroupPreparer(ctx, managementGroupID)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "ListByManagementGroup", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByManagementGroupSender(req)
+ if err != nil {
+ result.dlr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "ListByManagementGroup", resp, "Failure sending request")
+ return
+ }
+
+ result.dlr, err = client.ListByManagementGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "ListByManagementGroup", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByManagementGroupPreparer prepares the ListByManagementGroup request.
+func (client DefinitionsClient) ListByManagementGroupPreparer(ctx context.Context, managementGroupID string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "managementGroupId": autorest.Encode("path", managementGroupID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/providers/Microsoft.Management/managementgroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByManagementGroupSender sends the ListByManagementGroup request. The method will close the
+// http.Response Body if it receives an error.
+func (client DefinitionsClient) ListByManagementGroupSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByManagementGroupResponder handles the response to the ListByManagementGroup request. The method always
+// closes the http.Response Body.
+func (client DefinitionsClient) ListByManagementGroupResponder(resp *http.Response) (result DefinitionListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByManagementGroupNextResults retrieves the next set of results, if any.
+func (client DefinitionsClient) listByManagementGroupNextResults(ctx context.Context, lastResults DefinitionListResult) (result DefinitionListResult, err error) {
+ req, err := lastResults.definitionListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "policy.DefinitionsClient", "listByManagementGroupNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByManagementGroupSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "policy.DefinitionsClient", "listByManagementGroupNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByManagementGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "listByManagementGroupNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByManagementGroupComplete enumerates all values, automatically crossing page boundaries as required.
+func (client DefinitionsClient) ListByManagementGroupComplete(ctx context.Context, managementGroupID string) (result DefinitionListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DefinitionsClient.ListByManagementGroup")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByManagementGroup(ctx, managementGroupID)
+ return
+}
diff --git a/services/resources/mgmt/2019-06-01/policy/models.go b/services/resources/mgmt/2019-06-01/policy/models.go
new file mode 100644
index 000000000000..d5cd8cd809e1
--- /dev/null
+++ b/services/resources/mgmt/2019-06-01/policy/models.go
@@ -0,0 +1,867 @@
+package policy
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "encoding/json"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/to"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// The package's fully qualified name.
+const fqdn = "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/policy"
+
+// EnforcementMode enumerates the values for enforcement mode.
+type EnforcementMode string
+
+const (
+ // Default The policy effect is enforced during resource creation or update.
+ Default EnforcementMode = "Default"
+ // DoNotEnforce The policy effect is not enforced during resource creation or update.
+ DoNotEnforce EnforcementMode = "DoNotEnforce"
+)
+
+// PossibleEnforcementModeValues returns an array of possible values for the EnforcementMode const type.
+func PossibleEnforcementModeValues() []EnforcementMode {
+ return []EnforcementMode{Default, DoNotEnforce}
+}
+
+// ResourceIdentityType enumerates the values for resource identity type.
+type ResourceIdentityType string
+
+const (
+ // None ...
+ None ResourceIdentityType = "None"
+ // SystemAssigned ...
+ SystemAssigned ResourceIdentityType = "SystemAssigned"
+)
+
+// PossibleResourceIdentityTypeValues returns an array of possible values for the ResourceIdentityType const type.
+func PossibleResourceIdentityTypeValues() []ResourceIdentityType {
+ return []ResourceIdentityType{None, SystemAssigned}
+}
+
+// Type enumerates the values for type.
+type Type string
+
+const (
+ // BuiltIn ...
+ BuiltIn Type = "BuiltIn"
+ // Custom ...
+ Custom Type = "Custom"
+ // NotSpecified ...
+ NotSpecified Type = "NotSpecified"
+)
+
+// PossibleTypeValues returns an array of possible values for the Type const type.
+func PossibleTypeValues() []Type {
+ return []Type{BuiltIn, Custom, NotSpecified}
+}
+
+// Assignment the policy assignment.
+type Assignment struct {
+ autorest.Response `json:"-"`
+ // AssignmentProperties - Properties for the policy assignment.
+ *AssignmentProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; The ID of the policy assignment.
+ ID *string `json:"id,omitempty"`
+ // Type - READ-ONLY; The type of the policy assignment.
+ Type *string `json:"type,omitempty"`
+ // Name - READ-ONLY; The name of the policy assignment.
+ Name *string `json:"name,omitempty"`
+ // Sku - The policy sku. This property is optional, obsolete, and will be ignored.
+ Sku *Sku `json:"sku,omitempty"`
+ // Location - The location of the policy assignment. Only required when utilizing managed identity.
+ Location *string `json:"location,omitempty"`
+ // Identity - The managed identity associated with the policy assignment.
+ Identity *Identity `json:"identity,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for Assignment.
+func (a Assignment) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if a.AssignmentProperties != nil {
+ objectMap["properties"] = a.AssignmentProperties
+ }
+ if a.Sku != nil {
+ objectMap["sku"] = a.Sku
+ }
+ if a.Location != nil {
+ objectMap["location"] = a.Location
+ }
+ if a.Identity != nil {
+ objectMap["identity"] = a.Identity
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for Assignment struct.
+func (a *Assignment) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var assignmentProperties AssignmentProperties
+ err = json.Unmarshal(*v, &assignmentProperties)
+ if err != nil {
+ return err
+ }
+ a.AssignmentProperties = &assignmentProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ a.ID = &ID
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ a.Type = &typeVar
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ a.Name = &name
+ }
+ case "sku":
+ if v != nil {
+ var sku Sku
+ err = json.Unmarshal(*v, &sku)
+ if err != nil {
+ return err
+ }
+ a.Sku = &sku
+ }
+ case "location":
+ if v != nil {
+ var location string
+ err = json.Unmarshal(*v, &location)
+ if err != nil {
+ return err
+ }
+ a.Location = &location
+ }
+ case "identity":
+ if v != nil {
+ var identity Identity
+ err = json.Unmarshal(*v, &identity)
+ if err != nil {
+ return err
+ }
+ a.Identity = &identity
+ }
+ }
+ }
+
+ return nil
+}
+
+// AssignmentListResult list of policy assignments.
+type AssignmentListResult struct {
+ autorest.Response `json:"-"`
+ // Value - An array of policy assignments.
+ Value *[]Assignment `json:"value,omitempty"`
+ // NextLink - The URL to use for getting the next set of results.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// AssignmentListResultIterator provides access to a complete listing of Assignment values.
+type AssignmentListResultIterator struct {
+ i int
+ page AssignmentListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *AssignmentListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AssignmentListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *AssignmentListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter AssignmentListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter AssignmentListResultIterator) Response() AssignmentListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter AssignmentListResultIterator) Value() Assignment {
+ if !iter.page.NotDone() {
+ return Assignment{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the AssignmentListResultIterator type.
+func NewAssignmentListResultIterator(page AssignmentListResultPage) AssignmentListResultIterator {
+ return AssignmentListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (alr AssignmentListResult) IsEmpty() bool {
+ return alr.Value == nil || len(*alr.Value) == 0
+}
+
+// assignmentListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (alr AssignmentListResult) assignmentListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if alr.NextLink == nil || len(to.String(alr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(alr.NextLink)))
+}
+
+// AssignmentListResultPage contains a page of Assignment values.
+type AssignmentListResultPage struct {
+ fn func(context.Context, AssignmentListResult) (AssignmentListResult, error)
+ alr AssignmentListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *AssignmentListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AssignmentListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.alr)
+ if err != nil {
+ return err
+ }
+ page.alr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *AssignmentListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page AssignmentListResultPage) NotDone() bool {
+ return !page.alr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page AssignmentListResultPage) Response() AssignmentListResult {
+ return page.alr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page AssignmentListResultPage) Values() []Assignment {
+ if page.alr.IsEmpty() {
+ return nil
+ }
+ return *page.alr.Value
+}
+
+// Creates a new instance of the AssignmentListResultPage type.
+func NewAssignmentListResultPage(getNextPage func(context.Context, AssignmentListResult) (AssignmentListResult, error)) AssignmentListResultPage {
+ return AssignmentListResultPage{fn: getNextPage}
+}
+
+// AssignmentProperties the policy assignment properties.
+type AssignmentProperties struct {
+ // DisplayName - The display name of the policy assignment.
+ DisplayName *string `json:"displayName,omitempty"`
+ // PolicyDefinitionID - The ID of the policy definition or policy set definition being assigned.
+ PolicyDefinitionID *string `json:"policyDefinitionId,omitempty"`
+ // Scope - The scope for the policy assignment.
+ Scope *string `json:"scope,omitempty"`
+ // NotScopes - The policy's excluded scopes.
+ NotScopes *[]string `json:"notScopes,omitempty"`
+ // Parameters - Required if a parameter is used in policy rule.
+ Parameters interface{} `json:"parameters,omitempty"`
+ // Description - This message will be part of response in case of policy violation.
+ Description *string `json:"description,omitempty"`
+ // Metadata - The policy assignment metadata.
+ Metadata interface{} `json:"metadata,omitempty"`
+ // EnforcementMode - The policy assignment enforcement mode. Possible values are Default and DoNotEnforce. Possible values include: 'Default', 'DoNotEnforce'
+ EnforcementMode EnforcementMode `json:"enforcementMode,omitempty"`
+}
+
+// Definition the policy definition.
+type Definition struct {
+ autorest.Response `json:"-"`
+ // DefinitionProperties - The policy definition properties.
+ *DefinitionProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; The ID of the policy definition.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the policy definition.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource (Microsoft.Authorization/policyDefinitions).
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for Definition.
+func (d Definition) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if d.DefinitionProperties != nil {
+ objectMap["properties"] = d.DefinitionProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for Definition struct.
+func (d *Definition) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var definitionProperties DefinitionProperties
+ err = json.Unmarshal(*v, &definitionProperties)
+ if err != nil {
+ return err
+ }
+ d.DefinitionProperties = &definitionProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ d.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ d.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ d.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// DefinitionListResult list of policy definitions.
+type DefinitionListResult struct {
+ autorest.Response `json:"-"`
+ // Value - An array of policy definitions.
+ Value *[]Definition `json:"value,omitempty"`
+ // NextLink - The URL to use for getting the next set of results.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// DefinitionListResultIterator provides access to a complete listing of Definition values.
+type DefinitionListResultIterator struct {
+ i int
+ page DefinitionListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *DefinitionListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DefinitionListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *DefinitionListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter DefinitionListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter DefinitionListResultIterator) Response() DefinitionListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter DefinitionListResultIterator) Value() Definition {
+ if !iter.page.NotDone() {
+ return Definition{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the DefinitionListResultIterator type.
+func NewDefinitionListResultIterator(page DefinitionListResultPage) DefinitionListResultIterator {
+ return DefinitionListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (dlr DefinitionListResult) IsEmpty() bool {
+ return dlr.Value == nil || len(*dlr.Value) == 0
+}
+
+// definitionListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (dlr DefinitionListResult) definitionListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if dlr.NextLink == nil || len(to.String(dlr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(dlr.NextLink)))
+}
+
+// DefinitionListResultPage contains a page of Definition values.
+type DefinitionListResultPage struct {
+ fn func(context.Context, DefinitionListResult) (DefinitionListResult, error)
+ dlr DefinitionListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *DefinitionListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DefinitionListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.dlr)
+ if err != nil {
+ return err
+ }
+ page.dlr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *DefinitionListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page DefinitionListResultPage) NotDone() bool {
+ return !page.dlr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page DefinitionListResultPage) Response() DefinitionListResult {
+ return page.dlr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page DefinitionListResultPage) Values() []Definition {
+ if page.dlr.IsEmpty() {
+ return nil
+ }
+ return *page.dlr.Value
+}
+
+// Creates a new instance of the DefinitionListResultPage type.
+func NewDefinitionListResultPage(getNextPage func(context.Context, DefinitionListResult) (DefinitionListResult, error)) DefinitionListResultPage {
+ return DefinitionListResultPage{fn: getNextPage}
+}
+
+// DefinitionProperties the policy definition properties.
+type DefinitionProperties struct {
+ // PolicyType - The type of policy definition. Possible values are NotSpecified, BuiltIn, and Custom. Possible values include: 'NotSpecified', 'BuiltIn', 'Custom'
+ PolicyType Type `json:"policyType,omitempty"`
+ // Mode - The policy definition mode. Some examples are All, Indexed, Microsoft.KeyVault.Data.
+ Mode *string `json:"mode,omitempty"`
+ // DisplayName - The display name of the policy definition.
+ DisplayName *string `json:"displayName,omitempty"`
+ // Description - The policy definition description.
+ Description *string `json:"description,omitempty"`
+ // PolicyRule - The policy rule.
+ PolicyRule interface{} `json:"policyRule,omitempty"`
+ // Metadata - The policy definition metadata.
+ Metadata interface{} `json:"metadata,omitempty"`
+ // Parameters - Required if a parameter is used in policy rule.
+ Parameters interface{} `json:"parameters,omitempty"`
+}
+
+// DefinitionReference the policy definition reference.
+type DefinitionReference struct {
+ // PolicyDefinitionID - The ID of the policy definition or policy set definition.
+ PolicyDefinitionID *string `json:"policyDefinitionId,omitempty"`
+ // Parameters - Required if a parameter is used in policy rule.
+ Parameters interface{} `json:"parameters,omitempty"`
+}
+
+// ErrorResponse error response indicates Azure Resource Manager is not able to process the incoming
+// request. The reason is provided in the error message.
+type ErrorResponse struct {
+ // HTTPStatus - Http status code.
+ HTTPStatus *string `json:"httpStatus,omitempty"`
+ // ErrorCode - Error code.
+ ErrorCode *string `json:"errorCode,omitempty"`
+ // ErrorMessage - Error message indicating why the operation failed.
+ ErrorMessage *string `json:"errorMessage,omitempty"`
+}
+
+// Identity identity for the resource.
+type Identity struct {
+ // PrincipalID - READ-ONLY; The principal ID of the resource identity.
+ PrincipalID *string `json:"principalId,omitempty"`
+ // TenantID - READ-ONLY; The tenant ID of the resource identity.
+ TenantID *string `json:"tenantId,omitempty"`
+ // Type - The identity type. Possible values include: 'SystemAssigned', 'None'
+ Type ResourceIdentityType `json:"type,omitempty"`
+}
+
+// SetDefinition the policy set definition.
+type SetDefinition struct {
+ autorest.Response `json:"-"`
+ // SetDefinitionProperties - The policy definition properties.
+ *SetDefinitionProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; The ID of the policy set definition.
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the policy set definition.
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource (Microsoft.Authorization/policySetDefinitions).
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for SetDefinition.
+func (sd SetDefinition) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if sd.SetDefinitionProperties != nil {
+ objectMap["properties"] = sd.SetDefinitionProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for SetDefinition struct.
+func (sd *SetDefinition) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var setDefinitionProperties SetDefinitionProperties
+ err = json.Unmarshal(*v, &setDefinitionProperties)
+ if err != nil {
+ return err
+ }
+ sd.SetDefinitionProperties = &setDefinitionProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ sd.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ sd.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ sd.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// SetDefinitionListResult list of policy set definitions.
+type SetDefinitionListResult struct {
+ autorest.Response `json:"-"`
+ // Value - An array of policy set definitions.
+ Value *[]SetDefinition `json:"value,omitempty"`
+ // NextLink - The URL to use for getting the next set of results.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// SetDefinitionListResultIterator provides access to a complete listing of SetDefinition values.
+type SetDefinitionListResultIterator struct {
+ i int
+ page SetDefinitionListResultPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *SetDefinitionListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SetDefinitionListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *SetDefinitionListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter SetDefinitionListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter SetDefinitionListResultIterator) Response() SetDefinitionListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter SetDefinitionListResultIterator) Value() SetDefinition {
+ if !iter.page.NotDone() {
+ return SetDefinition{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the SetDefinitionListResultIterator type.
+func NewSetDefinitionListResultIterator(page SetDefinitionListResultPage) SetDefinitionListResultIterator {
+ return SetDefinitionListResultIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (sdlr SetDefinitionListResult) IsEmpty() bool {
+ return sdlr.Value == nil || len(*sdlr.Value) == 0
+}
+
+// setDefinitionListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (sdlr SetDefinitionListResult) setDefinitionListResultPreparer(ctx context.Context) (*http.Request, error) {
+ if sdlr.NextLink == nil || len(to.String(sdlr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(sdlr.NextLink)))
+}
+
+// SetDefinitionListResultPage contains a page of SetDefinition values.
+type SetDefinitionListResultPage struct {
+ fn func(context.Context, SetDefinitionListResult) (SetDefinitionListResult, error)
+ sdlr SetDefinitionListResult
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *SetDefinitionListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SetDefinitionListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.sdlr)
+ if err != nil {
+ return err
+ }
+ page.sdlr = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *SetDefinitionListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page SetDefinitionListResultPage) NotDone() bool {
+ return !page.sdlr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page SetDefinitionListResultPage) Response() SetDefinitionListResult {
+ return page.sdlr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page SetDefinitionListResultPage) Values() []SetDefinition {
+ if page.sdlr.IsEmpty() {
+ return nil
+ }
+ return *page.sdlr.Value
+}
+
+// Creates a new instance of the SetDefinitionListResultPage type.
+func NewSetDefinitionListResultPage(getNextPage func(context.Context, SetDefinitionListResult) (SetDefinitionListResult, error)) SetDefinitionListResultPage {
+ return SetDefinitionListResultPage{fn: getNextPage}
+}
+
+// SetDefinitionProperties the policy set definition properties.
+type SetDefinitionProperties struct {
+ // PolicyType - The type of policy definition. Possible values are NotSpecified, BuiltIn, and Custom. Possible values include: 'NotSpecified', 'BuiltIn', 'Custom'
+ PolicyType Type `json:"policyType,omitempty"`
+ // DisplayName - The display name of the policy set definition.
+ DisplayName *string `json:"displayName,omitempty"`
+ // Description - The policy set definition description.
+ Description *string `json:"description,omitempty"`
+ // Metadata - The policy set definition metadata.
+ Metadata interface{} `json:"metadata,omitempty"`
+ // Parameters - The policy set definition parameters that can be used in policy definition references.
+ Parameters interface{} `json:"parameters,omitempty"`
+ // PolicyDefinitions - An array of policy definition references.
+ PolicyDefinitions *[]DefinitionReference `json:"policyDefinitions,omitempty"`
+}
+
+// Sku the policy sku. This property is optional, obsolete, and will be ignored.
+type Sku struct {
+ // Name - The name of the policy sku. Possible values are A0 and A1.
+ Name *string `json:"name,omitempty"`
+ // Tier - The policy sku tier. Possible values are Free and Standard.
+ Tier *string `json:"tier,omitempty"`
+}
diff --git a/services/resources/mgmt/2019-06-01/policy/policyapi/interfaces.go b/services/resources/mgmt/2019-06-01/policy/policyapi/interfaces.go
new file mode 100644
index 000000000000..7169bcdd4700
--- /dev/null
+++ b/services/resources/mgmt/2019-06-01/policy/policyapi/interfaces.go
@@ -0,0 +1,71 @@
+package policyapi
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/policy"
+ "github.com/Azure/go-autorest/autorest"
+)
+
+// AssignmentsClientAPI contains the set of methods on the AssignmentsClient type.
+type AssignmentsClientAPI interface {
+ Create(ctx context.Context, scope string, policyAssignmentName string, parameters policy.Assignment) (result policy.Assignment, err error)
+ CreateByID(ctx context.Context, policyAssignmentID string, parameters policy.Assignment) (result policy.Assignment, err error)
+ Delete(ctx context.Context, scope string, policyAssignmentName string) (result policy.Assignment, err error)
+ DeleteByID(ctx context.Context, policyAssignmentID string) (result policy.Assignment, err error)
+ Get(ctx context.Context, scope string, policyAssignmentName string) (result policy.Assignment, err error)
+ GetByID(ctx context.Context, policyAssignmentID string) (result policy.Assignment, err error)
+ List(ctx context.Context, filter string) (result policy.AssignmentListResultPage, err error)
+ ListForResource(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, filter string) (result policy.AssignmentListResultPage, err error)
+ ListForResourceGroup(ctx context.Context, resourceGroupName string, filter string) (result policy.AssignmentListResultPage, err error)
+}
+
+var _ AssignmentsClientAPI = (*policy.AssignmentsClient)(nil)
+
+// DefinitionsClientAPI contains the set of methods on the DefinitionsClient type.
+type DefinitionsClientAPI interface {
+ CreateOrUpdate(ctx context.Context, policyDefinitionName string, parameters policy.Definition) (result policy.Definition, err error)
+ CreateOrUpdateAtManagementGroup(ctx context.Context, policyDefinitionName string, parameters policy.Definition, managementGroupID string) (result policy.Definition, err error)
+ Delete(ctx context.Context, policyDefinitionName string) (result autorest.Response, err error)
+ DeleteAtManagementGroup(ctx context.Context, policyDefinitionName string, managementGroupID string) (result autorest.Response, err error)
+ Get(ctx context.Context, policyDefinitionName string) (result policy.Definition, err error)
+ GetAtManagementGroup(ctx context.Context, policyDefinitionName string, managementGroupID string) (result policy.Definition, err error)
+ GetBuiltIn(ctx context.Context, policyDefinitionName string) (result policy.Definition, err error)
+ List(ctx context.Context) (result policy.DefinitionListResultPage, err error)
+ ListBuiltIn(ctx context.Context) (result policy.DefinitionListResultPage, err error)
+ ListByManagementGroup(ctx context.Context, managementGroupID string) (result policy.DefinitionListResultPage, err error)
+}
+
+var _ DefinitionsClientAPI = (*policy.DefinitionsClient)(nil)
+
+// SetDefinitionsClientAPI contains the set of methods on the SetDefinitionsClient type.
+type SetDefinitionsClientAPI interface {
+ CreateOrUpdate(ctx context.Context, policySetDefinitionName string, parameters policy.SetDefinition) (result policy.SetDefinition, err error)
+ CreateOrUpdateAtManagementGroup(ctx context.Context, policySetDefinitionName string, parameters policy.SetDefinition, managementGroupID string) (result policy.SetDefinition, err error)
+ Delete(ctx context.Context, policySetDefinitionName string) (result autorest.Response, err error)
+ DeleteAtManagementGroup(ctx context.Context, policySetDefinitionName string, managementGroupID string) (result autorest.Response, err error)
+ Get(ctx context.Context, policySetDefinitionName string) (result policy.SetDefinition, err error)
+ GetAtManagementGroup(ctx context.Context, policySetDefinitionName string, managementGroupID string) (result policy.SetDefinition, err error)
+ GetBuiltIn(ctx context.Context, policySetDefinitionName string) (result policy.SetDefinition, err error)
+ List(ctx context.Context) (result policy.SetDefinitionListResultPage, err error)
+ ListBuiltIn(ctx context.Context) (result policy.SetDefinitionListResultPage, err error)
+ ListByManagementGroup(ctx context.Context, managementGroupID string) (result policy.SetDefinitionListResultPage, err error)
+}
+
+var _ SetDefinitionsClientAPI = (*policy.SetDefinitionsClient)(nil)
diff --git a/services/resources/mgmt/2019-06-01/policy/setdefinitions.go b/services/resources/mgmt/2019-06-01/policy/setdefinitions.go
new file mode 100644
index 000000000000..391db09666db
--- /dev/null
+++ b/services/resources/mgmt/2019-06-01/policy/setdefinitions.go
@@ -0,0 +1,927 @@
+package policy
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// SetDefinitionsClient is the to manage and control access to your resources, you can define customized policies and
+// assign them at a scope.
+type SetDefinitionsClient struct {
+ BaseClient
+}
+
+// NewSetDefinitionsClient creates an instance of the SetDefinitionsClient client.
+func NewSetDefinitionsClient(subscriptionID string) SetDefinitionsClient {
+ return NewSetDefinitionsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewSetDefinitionsClientWithBaseURI creates an instance of the SetDefinitionsClient client.
+func NewSetDefinitionsClientWithBaseURI(baseURI string, subscriptionID string) SetDefinitionsClient {
+ return SetDefinitionsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate this operation creates or updates a policy set definition in the given subscription with the given
+// name.
+// Parameters:
+// policySetDefinitionName - the name of the policy set definition to create.
+// parameters - the policy set definition properties.
+func (client SetDefinitionsClient) CreateOrUpdate(ctx context.Context, policySetDefinitionName string, parameters SetDefinition) (result SetDefinition, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SetDefinitionsClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters.SetDefinitionProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.SetDefinitionProperties.PolicyDefinitions", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil {
+ return result, validation.NewError("policy.SetDefinitionsClient", "CreateOrUpdate", err.Error())
+ }
+
+ req, err := client.CreateOrUpdatePreparer(ctx, policySetDefinitionName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateOrUpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "CreateOrUpdate", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "CreateOrUpdate", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client SetDefinitionsClient) CreateOrUpdatePreparer(ctx context.Context, policySetDefinitionName string, parameters SetDefinition) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "policySetDefinitionName": autorest.Encode("path", policySetDefinitionName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ parameters.ID = nil
+ parameters.Name = nil
+ parameters.Type = nil
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policySetDefinitions/{policySetDefinitionName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client SetDefinitionsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client SetDefinitionsClient) CreateOrUpdateResponder(resp *http.Response) (result SetDefinition, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// CreateOrUpdateAtManagementGroup this operation creates or updates a policy set definition in the given management
+// group with the given name.
+// Parameters:
+// policySetDefinitionName - the name of the policy set definition to create.
+// parameters - the policy set definition properties.
+// managementGroupID - the ID of the management group.
+func (client SetDefinitionsClient) CreateOrUpdateAtManagementGroup(ctx context.Context, policySetDefinitionName string, parameters SetDefinition, managementGroupID string) (result SetDefinition, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SetDefinitionsClient.CreateOrUpdateAtManagementGroup")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: parameters,
+ Constraints: []validation.Constraint{{Target: "parameters.SetDefinitionProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "parameters.SetDefinitionProperties.PolicyDefinitions", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil {
+ return result, validation.NewError("policy.SetDefinitionsClient", "CreateOrUpdateAtManagementGroup", err.Error())
+ }
+
+ req, err := client.CreateOrUpdateAtManagementGroupPreparer(ctx, policySetDefinitionName, parameters, managementGroupID)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "CreateOrUpdateAtManagementGroup", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateOrUpdateAtManagementGroupSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "CreateOrUpdateAtManagementGroup", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateAtManagementGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "CreateOrUpdateAtManagementGroup", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreateOrUpdateAtManagementGroupPreparer prepares the CreateOrUpdateAtManagementGroup request.
+func (client SetDefinitionsClient) CreateOrUpdateAtManagementGroupPreparer(ctx context.Context, policySetDefinitionName string, parameters SetDefinition, managementGroupID string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "managementGroupId": autorest.Encode("path", managementGroupID),
+ "policySetDefinitionName": autorest.Encode("path", policySetDefinitionName),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ parameters.ID = nil
+ parameters.Name = nil
+ parameters.Type = nil
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/providers/Microsoft.Management/managementgroups/{managementGroupId}/providers/Microsoft.Authorization/policySetDefinitions/{policySetDefinitionName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateAtManagementGroupSender sends the CreateOrUpdateAtManagementGroup request. The method will close the
+// http.Response Body if it receives an error.
+func (client SetDefinitionsClient) CreateOrUpdateAtManagementGroupSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CreateOrUpdateAtManagementGroupResponder handles the response to the CreateOrUpdateAtManagementGroup request. The method always
+// closes the http.Response Body.
+func (client SetDefinitionsClient) CreateOrUpdateAtManagementGroupResponder(resp *http.Response) (result SetDefinition, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete this operation deletes the policy set definition in the given subscription with the given name.
+// Parameters:
+// policySetDefinitionName - the name of the policy set definition to delete.
+func (client SetDefinitionsClient) Delete(ctx context.Context, policySetDefinitionName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SetDefinitionsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeletePreparer(ctx, policySetDefinitionName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client SetDefinitionsClient) DeletePreparer(ctx context.Context, policySetDefinitionName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "policySetDefinitionName": autorest.Encode("path", policySetDefinitionName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policySetDefinitions/{policySetDefinitionName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client SetDefinitionsClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client SetDefinitionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// DeleteAtManagementGroup this operation deletes the policy set definition in the given management group with the
+// given name.
+// Parameters:
+// policySetDefinitionName - the name of the policy set definition to delete.
+// managementGroupID - the ID of the management group.
+func (client SetDefinitionsClient) DeleteAtManagementGroup(ctx context.Context, policySetDefinitionName string, managementGroupID string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SetDefinitionsClient.DeleteAtManagementGroup")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.DeleteAtManagementGroupPreparer(ctx, policySetDefinitionName, managementGroupID)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "DeleteAtManagementGroup", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteAtManagementGroupSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "DeleteAtManagementGroup", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteAtManagementGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "DeleteAtManagementGroup", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeleteAtManagementGroupPreparer prepares the DeleteAtManagementGroup request.
+func (client SetDefinitionsClient) DeleteAtManagementGroupPreparer(ctx context.Context, policySetDefinitionName string, managementGroupID string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "managementGroupId": autorest.Encode("path", managementGroupID),
+ "policySetDefinitionName": autorest.Encode("path", policySetDefinitionName),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/providers/Microsoft.Management/managementgroups/{managementGroupId}/providers/Microsoft.Authorization/policySetDefinitions/{policySetDefinitionName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteAtManagementGroupSender sends the DeleteAtManagementGroup request. The method will close the
+// http.Response Body if it receives an error.
+func (client SetDefinitionsClient) DeleteAtManagementGroupSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteAtManagementGroupResponder handles the response to the DeleteAtManagementGroup request. The method always
+// closes the http.Response Body.
+func (client SetDefinitionsClient) DeleteAtManagementGroupResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get this operation retrieves the policy set definition in the given subscription with the given name.
+// Parameters:
+// policySetDefinitionName - the name of the policy set definition to get.
+func (client SetDefinitionsClient) Get(ctx context.Context, policySetDefinitionName string) (result SetDefinition, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SetDefinitionsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetPreparer(ctx, policySetDefinitionName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client SetDefinitionsClient) GetPreparer(ctx context.Context, policySetDefinitionName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "policySetDefinitionName": autorest.Encode("path", policySetDefinitionName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policySetDefinitions/{policySetDefinitionName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client SetDefinitionsClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client SetDefinitionsClient) GetResponder(resp *http.Response) (result SetDefinition, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetAtManagementGroup this operation retrieves the policy set definition in the given management group with the given
+// name.
+// Parameters:
+// policySetDefinitionName - the name of the policy set definition to get.
+// managementGroupID - the ID of the management group.
+func (client SetDefinitionsClient) GetAtManagementGroup(ctx context.Context, policySetDefinitionName string, managementGroupID string) (result SetDefinition, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SetDefinitionsClient.GetAtManagementGroup")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetAtManagementGroupPreparer(ctx, policySetDefinitionName, managementGroupID)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "GetAtManagementGroup", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetAtManagementGroupSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "GetAtManagementGroup", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetAtManagementGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "GetAtManagementGroup", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetAtManagementGroupPreparer prepares the GetAtManagementGroup request.
+func (client SetDefinitionsClient) GetAtManagementGroupPreparer(ctx context.Context, policySetDefinitionName string, managementGroupID string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "managementGroupId": autorest.Encode("path", managementGroupID),
+ "policySetDefinitionName": autorest.Encode("path", policySetDefinitionName),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/providers/Microsoft.Management/managementgroups/{managementGroupId}/providers/Microsoft.Authorization/policySetDefinitions/{policySetDefinitionName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetAtManagementGroupSender sends the GetAtManagementGroup request. The method will close the
+// http.Response Body if it receives an error.
+func (client SetDefinitionsClient) GetAtManagementGroupSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetAtManagementGroupResponder handles the response to the GetAtManagementGroup request. The method always
+// closes the http.Response Body.
+func (client SetDefinitionsClient) GetAtManagementGroupResponder(resp *http.Response) (result SetDefinition, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// GetBuiltIn this operation retrieves the built-in policy set definition with the given name.
+// Parameters:
+// policySetDefinitionName - the name of the policy set definition to get.
+func (client SetDefinitionsClient) GetBuiltIn(ctx context.Context, policySetDefinitionName string) (result SetDefinition, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SetDefinitionsClient.GetBuiltIn")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ req, err := client.GetBuiltInPreparer(ctx, policySetDefinitionName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "GetBuiltIn", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetBuiltInSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "GetBuiltIn", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetBuiltInResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "GetBuiltIn", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetBuiltInPreparer prepares the GetBuiltIn request.
+func (client SetDefinitionsClient) GetBuiltInPreparer(ctx context.Context, policySetDefinitionName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "policySetDefinitionName": autorest.Encode("path", policySetDefinitionName),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/providers/Microsoft.Authorization/policySetDefinitions/{policySetDefinitionName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetBuiltInSender sends the GetBuiltIn request. The method will close the
+// http.Response Body if it receives an error.
+func (client SetDefinitionsClient) GetBuiltInSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetBuiltInResponder handles the response to the GetBuiltIn request. The method always
+// closes the http.Response Body.
+func (client SetDefinitionsClient) GetBuiltInResponder(resp *http.Response) (result SetDefinition, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List this operation retrieves a list of all the policy set definitions in the given subscription.
+func (client SetDefinitionsClient) List(ctx context.Context) (result SetDefinitionListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SetDefinitionsClient.List")
+ defer func() {
+ sc := -1
+ if result.sdlr.Response.Response != nil {
+ sc = result.sdlr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.sdlr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.sdlr, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client SetDefinitionsClient) ListPreparer(ctx context.Context) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policySetDefinitions", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client SetDefinitionsClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client SetDefinitionsClient) ListResponder(resp *http.Response) (result SetDefinitionListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client SetDefinitionsClient) listNextResults(ctx context.Context, lastResults SetDefinitionListResult) (result SetDefinitionListResult, err error) {
+ req, err := lastResults.setDefinitionListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client SetDefinitionsClient) ListComplete(ctx context.Context) (result SetDefinitionListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SetDefinitionsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx)
+ return
+}
+
+// ListBuiltIn this operation retrieves a list of all the built-in policy set definitions.
+func (client SetDefinitionsClient) ListBuiltIn(ctx context.Context) (result SetDefinitionListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SetDefinitionsClient.ListBuiltIn")
+ defer func() {
+ sc := -1
+ if result.sdlr.Response.Response != nil {
+ sc = result.sdlr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listBuiltInNextResults
+ req, err := client.ListBuiltInPreparer(ctx)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "ListBuiltIn", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListBuiltInSender(req)
+ if err != nil {
+ result.sdlr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "ListBuiltIn", resp, "Failure sending request")
+ return
+ }
+
+ result.sdlr, err = client.ListBuiltInResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "ListBuiltIn", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListBuiltInPreparer prepares the ListBuiltIn request.
+func (client SetDefinitionsClient) ListBuiltInPreparer(ctx context.Context) (*http.Request, error) {
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPath("/providers/Microsoft.Authorization/policySetDefinitions"),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListBuiltInSender sends the ListBuiltIn request. The method will close the
+// http.Response Body if it receives an error.
+func (client SetDefinitionsClient) ListBuiltInSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListBuiltInResponder handles the response to the ListBuiltIn request. The method always
+// closes the http.Response Body.
+func (client SetDefinitionsClient) ListBuiltInResponder(resp *http.Response) (result SetDefinitionListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listBuiltInNextResults retrieves the next set of results, if any.
+func (client SetDefinitionsClient) listBuiltInNextResults(ctx context.Context, lastResults SetDefinitionListResult) (result SetDefinitionListResult, err error) {
+ req, err := lastResults.setDefinitionListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "listBuiltInNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListBuiltInSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "listBuiltInNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListBuiltInResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "listBuiltInNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListBuiltInComplete enumerates all values, automatically crossing page boundaries as required.
+func (client SetDefinitionsClient) ListBuiltInComplete(ctx context.Context) (result SetDefinitionListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SetDefinitionsClient.ListBuiltIn")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListBuiltIn(ctx)
+ return
+}
+
+// ListByManagementGroup this operation retrieves a list of all the a policy set definition in the given management
+// group.
+// Parameters:
+// managementGroupID - the ID of the management group.
+func (client SetDefinitionsClient) ListByManagementGroup(ctx context.Context, managementGroupID string) (result SetDefinitionListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SetDefinitionsClient.ListByManagementGroup")
+ defer func() {
+ sc := -1
+ if result.sdlr.Response.Response != nil {
+ sc = result.sdlr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.fn = client.listByManagementGroupNextResults
+ req, err := client.ListByManagementGroupPreparer(ctx, managementGroupID)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "ListByManagementGroup", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByManagementGroupSender(req)
+ if err != nil {
+ result.sdlr.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "ListByManagementGroup", resp, "Failure sending request")
+ return
+ }
+
+ result.sdlr, err = client.ListByManagementGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "ListByManagementGroup", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByManagementGroupPreparer prepares the ListByManagementGroup request.
+func (client SetDefinitionsClient) ListByManagementGroupPreparer(ctx context.Context, managementGroupID string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "managementGroupId": autorest.Encode("path", managementGroupID),
+ }
+
+ const APIVersion = "2019-06-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/providers/Microsoft.Management/managementgroups/{managementGroupId}/providers/Microsoft.Authorization/policySetDefinitions", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListByManagementGroupSender sends the ListByManagementGroup request. The method will close the
+// http.Response Body if it receives an error.
+func (client SetDefinitionsClient) ListByManagementGroupSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListByManagementGroupResponder handles the response to the ListByManagementGroup request. The method always
+// closes the http.Response Body.
+func (client SetDefinitionsClient) ListByManagementGroupResponder(resp *http.Response) (result SetDefinitionListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listByManagementGroupNextResults retrieves the next set of results, if any.
+func (client SetDefinitionsClient) listByManagementGroupNextResults(ctx context.Context, lastResults SetDefinitionListResult) (result SetDefinitionListResult, err error) {
+ req, err := lastResults.setDefinitionListResultPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "listByManagementGroupNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListByManagementGroupSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "listByManagementGroupNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListByManagementGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "policy.SetDefinitionsClient", "listByManagementGroupNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListByManagementGroupComplete enumerates all values, automatically crossing page boundaries as required.
+func (client SetDefinitionsClient) ListByManagementGroupComplete(ctx context.Context, managementGroupID string) (result SetDefinitionListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SetDefinitionsClient.ListByManagementGroup")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.ListByManagementGroup(ctx, managementGroupID)
+ return
+}
diff --git a/services/resources/mgmt/2019-06-01/policy/version.go b/services/resources/mgmt/2019-06-01/policy/version.go
new file mode 100644
index 000000000000..10032e3ed295
--- /dev/null
+++ b/services/resources/mgmt/2019-06-01/policy/version.go
@@ -0,0 +1,30 @@
+package policy
+
+import "github.com/Azure/azure-sdk-for-go/version"
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+// UserAgent returns the UserAgent string to use when sending http.Requests.
+func UserAgent() string {
+ return "Azure-SDK-For-Go/" + version.Number + " policy/2019-06-01"
+}
+
+// Version returns the semantic version (see http://semver.org) of the client.
+func Version() string {
+ return version.Number
+}
diff --git a/services/servicefabric/6.2/servicefabric/models.go b/services/servicefabric/6.2/servicefabric/models.go
index 2df3cf632b4d..6a8ea132b28c 100644
--- a/services/servicefabric/6.2/servicefabric/models.go
+++ b/services/servicefabric/6.2/servicefabric/models.go
@@ -46509,7 +46509,7 @@ func (sppd ServicePlacementPolicyDescription) AsBasicServicePlacementPolicyDescr
// domain.
//
// This placement policy is usually used with fault domains in scenarios where the Service Fabric cluster
-// is geographically distributed in order to indicate that a service�s primary replica should be located in
+// is geographically distributed in order to indicate that a service's primary replica should be located in
// a particular fault domain, which in geo-distributed scenarios usually aligns with regional or datacenter
// boundaries. Note that since this is an optimization it is possible that the Primary replica may not end
// up located in this domain due to failures, capacity limits, or other constraints.
diff --git a/services/servicefabric/mgmt/2019-03-01/servicefabric/applications.go b/services/servicefabric/mgmt/2019-03-01/servicefabric/applications.go
index 09430d97a34e..069a1a741ce3 100644
--- a/services/servicefabric/mgmt/2019-03-01/servicefabric/applications.go
+++ b/services/servicefabric/mgmt/2019-03-01/servicefabric/applications.go
@@ -40,15 +40,15 @@ func NewApplicationsClientWithBaseURI(baseURI string, subscriptionID string) App
return ApplicationsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
-// Create create or update a Service Fabric application resource with the specified name.
+// CreateOrUpdate create or update a Service Fabric application resource with the specified name.
// Parameters:
// resourceGroupName - the name of the resource group.
// clusterName - the name of the cluster resource.
// applicationName - the name of the application resource.
// parameters - the application resource.
-func (client ApplicationsClient) Create(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, parameters ApplicationResource) (result ApplicationsCreateFuture, err error) {
+func (client ApplicationsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, parameters ApplicationResource) (result ApplicationsCreateOrUpdateFuture, err error) {
if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationsClient.Create")
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationsClient.CreateOrUpdate")
defer func() {
sc := -1
if result.Response() != nil {
@@ -57,23 +57,23 @@ func (client ApplicationsClient) Create(ctx context.Context, resourceGroupName s
tracing.EndSpan(ctx, sc, err)
}()
}
- req, err := client.CreatePreparer(ctx, resourceGroupName, clusterName, applicationName, parameters)
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, clusterName, applicationName, parameters)
if err != nil {
- err = autorest.NewErrorWithError(err, "servicefabric.ApplicationsClient", "Create", nil, "Failure preparing request")
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationsClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
- result, err = client.CreateSender(req)
+ result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "servicefabric.ApplicationsClient", "Create", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
return
}
return
}
-// CreatePreparer prepares the Create request.
-func (client ApplicationsClient) CreatePreparer(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, parameters ApplicationResource) (*http.Request, error) {
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client ApplicationsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, parameters ApplicationResource) (*http.Request, error) {
pathParameters := map[string]interface{}{
"applicationName": autorest.Encode("path", applicationName),
"clusterName": autorest.Encode("path", clusterName),
@@ -81,7 +81,7 @@ func (client ApplicationsClient) CreatePreparer(ctx context.Context, resourceGro
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2019-03-01-preview"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -96,12 +96,12 @@ func (client ApplicationsClient) CreatePreparer(ctx context.Context, resourceGro
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
-// CreateSender sends the Create request. The method will close the
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
-func (client ApplicationsClient) CreateSender(req *http.Request) (future ApplicationsCreateFuture, err error) {
+func (client ApplicationsClient) CreateOrUpdateSender(req *http.Request) (future ApplicationsCreateOrUpdateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
var resp *http.Response
- resp, err = autorest.SendWithSender(client, req,
- azure.DoRetryWithRegistration(client.Client))
+ resp, err = autorest.SendWithSender(client, req, sd...)
if err != nil {
return
}
@@ -109,9 +109,9 @@ func (client ApplicationsClient) CreateSender(req *http.Request) (future Applica
return
}
-// CreateResponder handles the response to the Create request. The method always
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
-func (client ApplicationsClient) CreateResponder(resp *http.Response) (result ApplicationResource, err error) {
+func (client ApplicationsClient) CreateOrUpdateResponder(resp *http.Response) (result ApplicationResource, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
@@ -162,7 +162,7 @@ func (client ApplicationsClient) DeletePreparer(ctx context.Context, resourceGro
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2019-03-01-preview"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -178,9 +178,9 @@ func (client ApplicationsClient) DeletePreparer(ctx context.Context, resourceGro
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client ApplicationsClient) DeleteSender(req *http.Request) (future ApplicationsDeleteFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
var resp *http.Response
- resp, err = autorest.SendWithSender(client, req,
- azure.DoRetryWithRegistration(client.Client))
+ resp, err = autorest.SendWithSender(client, req, sd...)
if err != nil {
return
}
@@ -247,7 +247,7 @@ func (client ApplicationsClient) GetPreparer(ctx context.Context, resourceGroupN
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2019-03-01-preview"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -263,8 +263,8 @@ func (client ApplicationsClient) GetPreparer(ctx context.Context, resourceGroupN
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client ApplicationsClient) GetSender(req *http.Request) (*http.Response, error) {
- return autorest.SendWithSender(client, req,
- azure.DoRetryWithRegistration(client.Client))
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
}
// GetResponder handles the response to the Get request. The method always
@@ -325,7 +325,7 @@ func (client ApplicationsClient) ListPreparer(ctx context.Context, resourceGroup
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2019-03-01-preview"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -341,8 +341,8 @@ func (client ApplicationsClient) ListPreparer(ctx context.Context, resourceGroup
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client ApplicationsClient) ListSender(req *http.Request) (*http.Response, error) {
- return autorest.SendWithSender(client, req,
- azure.DoRetryWithRegistration(client.Client))
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
}
// ListResponder handles the response to the List request. The method always
@@ -399,7 +399,7 @@ func (client ApplicationsClient) UpdatePreparer(ctx context.Context, resourceGro
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2019-03-01-preview"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -417,9 +417,9 @@ func (client ApplicationsClient) UpdatePreparer(ctx context.Context, resourceGro
// UpdateSender sends the Update request. The method will close the
// http.Response Body if it receives an error.
func (client ApplicationsClient) UpdateSender(req *http.Request) (future ApplicationsUpdateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
var resp *http.Response
- resp, err = autorest.SendWithSender(client, req,
- azure.DoRetryWithRegistration(client.Client))
+ resp, err = autorest.SendWithSender(client, req, sd...)
if err != nil {
return
}
@@ -429,7 +429,7 @@ func (client ApplicationsClient) UpdateSender(req *http.Request) (future Applica
// UpdateResponder handles the response to the Update request. The method always
// closes the http.Response Body.
-func (client ApplicationsClient) UpdateResponder(resp *http.Response) (result ApplicationResourceUpdate, err error) {
+func (client ApplicationsClient) UpdateResponder(resp *http.Response) (result ApplicationResource, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
diff --git a/services/servicefabric/mgmt/2019-03-01/servicefabric/applicationtypes.go b/services/servicefabric/mgmt/2019-03-01/servicefabric/applicationtypes.go
index 34ff06ec7b35..ab95ffc76e99 100644
--- a/services/servicefabric/mgmt/2019-03-01/servicefabric/applicationtypes.go
+++ b/services/servicefabric/mgmt/2019-03-01/servicefabric/applicationtypes.go
@@ -40,15 +40,15 @@ func NewApplicationTypesClientWithBaseURI(baseURI string, subscriptionID string)
return ApplicationTypesClient{NewWithBaseURI(baseURI, subscriptionID)}
}
-// Create create or update a Service Fabric application type name resource with the specified name.
+// CreateOrUpdate create or update a Service Fabric application type name resource with the specified name.
// Parameters:
// resourceGroupName - the name of the resource group.
// clusterName - the name of the cluster resource.
// applicationTypeName - the name of the application type name resource.
// parameters - the application type name resource.
-func (client ApplicationTypesClient) Create(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, parameters ApplicationTypeResource) (result ApplicationTypeResource, err error) {
+func (client ApplicationTypesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, parameters ApplicationTypeResource) (result ApplicationTypeResource, err error) {
if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationTypesClient.Create")
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationTypesClient.CreateOrUpdate")
defer func() {
sc := -1
if result.Response.Response != nil {
@@ -57,29 +57,29 @@ func (client ApplicationTypesClient) Create(ctx context.Context, resourceGroupNa
tracing.EndSpan(ctx, sc, err)
}()
}
- req, err := client.CreatePreparer(ctx, resourceGroupName, clusterName, applicationTypeName, parameters)
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, clusterName, applicationTypeName, parameters)
if err != nil {
- err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypesClient", "Create", nil, "Failure preparing request")
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypesClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
- resp, err := client.CreateSender(req)
+ resp, err := client.CreateOrUpdateSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
- err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypesClient", "Create", resp, "Failure sending request")
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypesClient", "CreateOrUpdate", resp, "Failure sending request")
return
}
- result, err = client.CreateResponder(resp)
+ result, err = client.CreateOrUpdateResponder(resp)
if err != nil {
- err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypesClient", "Create", resp, "Failure responding to request")
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypesClient", "CreateOrUpdate", resp, "Failure responding to request")
}
return
}
-// CreatePreparer prepares the Create request.
-func (client ApplicationTypesClient) CreatePreparer(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, parameters ApplicationTypeResource) (*http.Request, error) {
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client ApplicationTypesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, parameters ApplicationTypeResource) (*http.Request, error) {
pathParameters := map[string]interface{}{
"applicationTypeName": autorest.Encode("path", applicationTypeName),
"clusterName": autorest.Encode("path", clusterName),
@@ -87,7 +87,7 @@ func (client ApplicationTypesClient) CreatePreparer(ctx context.Context, resourc
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2019-03-01-preview"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -102,16 +102,16 @@ func (client ApplicationTypesClient) CreatePreparer(ctx context.Context, resourc
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
-// CreateSender sends the Create request. The method will close the
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
-func (client ApplicationTypesClient) CreateSender(req *http.Request) (*http.Response, error) {
- return autorest.SendWithSender(client, req,
- azure.DoRetryWithRegistration(client.Client))
+func (client ApplicationTypesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
}
-// CreateResponder handles the response to the Create request. The method always
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
-func (client ApplicationTypesClient) CreateResponder(resp *http.Response) (result ApplicationTypeResource, err error) {
+func (client ApplicationTypesClient) CreateOrUpdateResponder(resp *http.Response) (result ApplicationTypeResource, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
@@ -162,7 +162,7 @@ func (client ApplicationTypesClient) DeletePreparer(ctx context.Context, resourc
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2019-03-01-preview"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -178,9 +178,9 @@ func (client ApplicationTypesClient) DeletePreparer(ctx context.Context, resourc
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client ApplicationTypesClient) DeleteSender(req *http.Request) (future ApplicationTypesDeleteFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
var resp *http.Response
- resp, err = autorest.SendWithSender(client, req,
- azure.DoRetryWithRegistration(client.Client))
+ resp, err = autorest.SendWithSender(client, req, sd...)
if err != nil {
return
}
@@ -247,7 +247,7 @@ func (client ApplicationTypesClient) GetPreparer(ctx context.Context, resourceGr
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2019-03-01-preview"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -263,8 +263,8 @@ func (client ApplicationTypesClient) GetPreparer(ctx context.Context, resourceGr
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client ApplicationTypesClient) GetSender(req *http.Request) (*http.Response, error) {
- return autorest.SendWithSender(client, req,
- azure.DoRetryWithRegistration(client.Client))
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
}
// GetResponder handles the response to the Get request. The method always
@@ -325,7 +325,7 @@ func (client ApplicationTypesClient) ListPreparer(ctx context.Context, resourceG
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2019-03-01-preview"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -341,8 +341,8 @@ func (client ApplicationTypesClient) ListPreparer(ctx context.Context, resourceG
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client ApplicationTypesClient) ListSender(req *http.Request) (*http.Response, error) {
- return autorest.SendWithSender(client, req,
- azure.DoRetryWithRegistration(client.Client))
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
}
// ListResponder handles the response to the List request. The method always
diff --git a/services/servicefabric/mgmt/2019-03-01/servicefabric/applicationtypeversions.go b/services/servicefabric/mgmt/2019-03-01/servicefabric/applicationtypeversions.go
index cbb637dcf33e..5764aa455349 100644
--- a/services/servicefabric/mgmt/2019-03-01/servicefabric/applicationtypeversions.go
+++ b/services/servicefabric/mgmt/2019-03-01/servicefabric/applicationtypeversions.go
@@ -41,16 +41,16 @@ func NewApplicationTypeVersionsClientWithBaseURI(baseURI string, subscriptionID
return ApplicationTypeVersionsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
-// Create create or update a Service Fabric application type version resource with the specified name.
+// CreateOrUpdate create or update a Service Fabric application type version resource with the specified name.
// Parameters:
// resourceGroupName - the name of the resource group.
// clusterName - the name of the cluster resource.
// applicationTypeName - the name of the application type name resource.
// version - the application type version.
// parameters - the application type version resource.
-func (client ApplicationTypeVersionsClient) Create(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, version string, parameters ApplicationTypeVersionResource) (result ApplicationTypeVersionsCreateFuture, err error) {
+func (client ApplicationTypeVersionsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, version string, parameters ApplicationTypeVersionResource) (result ApplicationTypeVersionsCreateOrUpdateFuture, err error) {
if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationTypeVersionsClient.Create")
+ ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationTypeVersionsClient.CreateOrUpdate")
defer func() {
sc := -1
if result.Response() != nil {
@@ -63,26 +63,26 @@ func (client ApplicationTypeVersionsClient) Create(ctx context.Context, resource
{TargetValue: parameters,
Constraints: []validation.Constraint{{Target: "parameters.ApplicationTypeVersionResourceProperties", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "parameters.ApplicationTypeVersionResourceProperties.AppPackageURL", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil {
- return result, validation.NewError("servicefabric.ApplicationTypeVersionsClient", "Create", err.Error())
+ return result, validation.NewError("servicefabric.ApplicationTypeVersionsClient", "CreateOrUpdate", err.Error())
}
- req, err := client.CreatePreparer(ctx, resourceGroupName, clusterName, applicationTypeName, version, parameters)
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, clusterName, applicationTypeName, version, parameters)
if err != nil {
- err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypeVersionsClient", "Create", nil, "Failure preparing request")
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypeVersionsClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
- result, err = client.CreateSender(req)
+ result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypeVersionsClient", "Create", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypeVersionsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
return
}
return
}
-// CreatePreparer prepares the Create request.
-func (client ApplicationTypeVersionsClient) CreatePreparer(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, version string, parameters ApplicationTypeVersionResource) (*http.Request, error) {
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client ApplicationTypeVersionsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, version string, parameters ApplicationTypeVersionResource) (*http.Request, error) {
pathParameters := map[string]interface{}{
"applicationTypeName": autorest.Encode("path", applicationTypeName),
"clusterName": autorest.Encode("path", clusterName),
@@ -91,7 +91,7 @@ func (client ApplicationTypeVersionsClient) CreatePreparer(ctx context.Context,
"version": autorest.Encode("path", version),
}
- const APIVersion = "2019-03-01-preview"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -106,12 +106,12 @@ func (client ApplicationTypeVersionsClient) CreatePreparer(ctx context.Context,
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
-// CreateSender sends the Create request. The method will close the
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
-func (client ApplicationTypeVersionsClient) CreateSender(req *http.Request) (future ApplicationTypeVersionsCreateFuture, err error) {
+func (client ApplicationTypeVersionsClient) CreateOrUpdateSender(req *http.Request) (future ApplicationTypeVersionsCreateOrUpdateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
var resp *http.Response
- resp, err = autorest.SendWithSender(client, req,
- azure.DoRetryWithRegistration(client.Client))
+ resp, err = autorest.SendWithSender(client, req, sd...)
if err != nil {
return
}
@@ -119,9 +119,9 @@ func (client ApplicationTypeVersionsClient) CreateSender(req *http.Request) (fut
return
}
-// CreateResponder handles the response to the Create request. The method always
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
-func (client ApplicationTypeVersionsClient) CreateResponder(resp *http.Response) (result ApplicationTypeVersionResource, err error) {
+func (client ApplicationTypeVersionsClient) CreateOrUpdateResponder(resp *http.Response) (result ApplicationTypeVersionResource, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
@@ -174,7 +174,7 @@ func (client ApplicationTypeVersionsClient) DeletePreparer(ctx context.Context,
"version": autorest.Encode("path", version),
}
- const APIVersion = "2019-03-01-preview"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -190,9 +190,9 @@ func (client ApplicationTypeVersionsClient) DeletePreparer(ctx context.Context,
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client ApplicationTypeVersionsClient) DeleteSender(req *http.Request) (future ApplicationTypeVersionsDeleteFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
var resp *http.Response
- resp, err = autorest.SendWithSender(client, req,
- azure.DoRetryWithRegistration(client.Client))
+ resp, err = autorest.SendWithSender(client, req, sd...)
if err != nil {
return
}
@@ -261,7 +261,7 @@ func (client ApplicationTypeVersionsClient) GetPreparer(ctx context.Context, res
"version": autorest.Encode("path", version),
}
- const APIVersion = "2019-03-01-preview"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -277,8 +277,8 @@ func (client ApplicationTypeVersionsClient) GetPreparer(ctx context.Context, res
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client ApplicationTypeVersionsClient) GetSender(req *http.Request) (*http.Response, error) {
- return autorest.SendWithSender(client, req,
- azure.DoRetryWithRegistration(client.Client))
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
}
// GetResponder handles the response to the Get request. The method always
@@ -341,7 +341,7 @@ func (client ApplicationTypeVersionsClient) ListPreparer(ctx context.Context, re
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2019-03-01-preview"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -357,8 +357,8 @@ func (client ApplicationTypeVersionsClient) ListPreparer(ctx context.Context, re
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client ApplicationTypeVersionsClient) ListSender(req *http.Request) (*http.Response, error) {
- return autorest.SendWithSender(client, req,
- azure.DoRetryWithRegistration(client.Client))
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
}
// ListResponder handles the response to the List request. The method always
diff --git a/services/servicefabric/mgmt/2019-03-01/servicefabric/client.go b/services/servicefabric/mgmt/2019-03-01/servicefabric/client.go
index 5d0f53e073fc..c1ab1f71bffb 100644
--- a/services/servicefabric/mgmt/2019-03-01/servicefabric/client.go
+++ b/services/servicefabric/mgmt/2019-03-01/servicefabric/client.go
@@ -1,4 +1,4 @@
-// Package servicefabric implements the Azure ARM Servicefabric service API version .
+// Package servicefabric implements the Azure ARM Servicefabric service API version 2019-03-01.
//
// Service Fabric Management Client
package servicefabric
diff --git a/services/servicefabric/mgmt/2019-03-01/servicefabric/clusters.go b/services/servicefabric/mgmt/2019-03-01/servicefabric/clusters.go
index 3cbbb86265ed..bf067d540602 100644
--- a/services/servicefabric/mgmt/2019-03-01/servicefabric/clusters.go
+++ b/services/servicefabric/mgmt/2019-03-01/servicefabric/clusters.go
@@ -41,14 +41,14 @@ func NewClustersClientWithBaseURI(baseURI string, subscriptionID string) Cluster
return ClustersClient{NewWithBaseURI(baseURI, subscriptionID)}
}
-// Create create or update a Service Fabric cluster resource with the specified name.
+// CreateOrUpdate create or update a Service Fabric cluster resource with the specified name.
// Parameters:
// resourceGroupName - the name of the resource group.
// clusterName - the name of the cluster resource.
// parameters - the cluster resource.
-func (client ClustersClient) Create(ctx context.Context, resourceGroupName string, clusterName string, parameters Cluster) (result ClustersCreateFuture, err error) {
+func (client ClustersClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, clusterName string, parameters Cluster) (result ClustersCreateOrUpdateFuture, err error) {
if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ClustersClient.Create")
+ ctx = tracing.StartSpan(ctx, fqdn+"/ClustersClient.CreateOrUpdate")
defer func() {
sc := -1
if result.Response() != nil {
@@ -106,33 +106,33 @@ func (client ClustersClient) Create(ctx context.Context, resourceGroupName strin
}},
}},
}}}}}); err != nil {
- return result, validation.NewError("servicefabric.ClustersClient", "Create", err.Error())
+ return result, validation.NewError("servicefabric.ClustersClient", "CreateOrUpdate", err.Error())
}
- req, err := client.CreatePreparer(ctx, resourceGroupName, clusterName, parameters)
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, clusterName, parameters)
if err != nil {
- err = autorest.NewErrorWithError(err, "servicefabric.ClustersClient", "Create", nil, "Failure preparing request")
+ err = autorest.NewErrorWithError(err, "servicefabric.ClustersClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
- result, err = client.CreateSender(req)
+ result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "servicefabric.ClustersClient", "Create", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "servicefabric.ClustersClient", "CreateOrUpdate", result.Response(), "Failure sending request")
return
}
return
}
-// CreatePreparer prepares the Create request.
-func (client ClustersClient) CreatePreparer(ctx context.Context, resourceGroupName string, clusterName string, parameters Cluster) (*http.Request, error) {
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client ClustersClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, clusterName string, parameters Cluster) (*http.Request, error) {
pathParameters := map[string]interface{}{
"clusterName": autorest.Encode("path", clusterName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-02-01"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -147,12 +147,12 @@ func (client ClustersClient) CreatePreparer(ctx context.Context, resourceGroupNa
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
-// CreateSender sends the Create request. The method will close the
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
-func (client ClustersClient) CreateSender(req *http.Request) (future ClustersCreateFuture, err error) {
+func (client ClustersClient) CreateOrUpdateSender(req *http.Request) (future ClustersCreateOrUpdateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
var resp *http.Response
- resp, err = autorest.SendWithSender(client, req,
- azure.DoRetryWithRegistration(client.Client))
+ resp, err = autorest.SendWithSender(client, req, sd...)
if err != nil {
return
}
@@ -160,9 +160,9 @@ func (client ClustersClient) CreateSender(req *http.Request) (future ClustersCre
return
}
-// CreateResponder handles the response to the Create request. The method always
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
-func (client ClustersClient) CreateResponder(resp *http.Response) (result Cluster, err error) {
+func (client ClustersClient) CreateOrUpdateResponder(resp *http.Response) (result Cluster, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
@@ -217,7 +217,7 @@ func (client ClustersClient) DeletePreparer(ctx context.Context, resourceGroupNa
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-02-01"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -233,8 +233,8 @@ func (client ClustersClient) DeletePreparer(ctx context.Context, resourceGroupNa
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client ClustersClient) DeleteSender(req *http.Request) (*http.Response, error) {
- return autorest.SendWithSender(client, req,
- azure.DoRetryWithRegistration(client.Client))
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
}
// DeleteResponder handles the response to the Delete request. The method always
@@ -294,7 +294,7 @@ func (client ClustersClient) GetPreparer(ctx context.Context, resourceGroupName
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-02-01"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -310,8 +310,8 @@ func (client ClustersClient) GetPreparer(ctx context.Context, resourceGroupName
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client ClustersClient) GetSender(req *http.Request) (*http.Response, error) {
- return autorest.SendWithSender(client, req,
- azure.DoRetryWithRegistration(client.Client))
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
}
// GetResponder handles the response to the Get request. The method always
@@ -366,7 +366,7 @@ func (client ClustersClient) ListPreparer(ctx context.Context) (*http.Request, e
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-02-01"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -382,8 +382,8 @@ func (client ClustersClient) ListPreparer(ctx context.Context) (*http.Request, e
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client ClustersClient) ListSender(req *http.Request) (*http.Response, error) {
- return autorest.SendWithSender(client, req,
- azure.DoRetryWithRegistration(client.Client))
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
}
// ListResponder handles the response to the List request. The method always
@@ -442,7 +442,7 @@ func (client ClustersClient) ListByResourceGroupPreparer(ctx context.Context, re
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-02-01"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -458,8 +458,8 @@ func (client ClustersClient) ListByResourceGroupPreparer(ctx context.Context, re
// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
// http.Response Body if it receives an error.
func (client ClustersClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
- return autorest.SendWithSender(client, req,
- azure.DoRetryWithRegistration(client.Client))
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
}
// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
@@ -515,7 +515,7 @@ func (client ClustersClient) UpdatePreparer(ctx context.Context, resourceGroupNa
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-02-01"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -533,9 +533,9 @@ func (client ClustersClient) UpdatePreparer(ctx context.Context, resourceGroupNa
// UpdateSender sends the Update request. The method will close the
// http.Response Body if it receives an error.
func (client ClustersClient) UpdateSender(req *http.Request) (future ClustersUpdateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
var resp *http.Response
- resp, err = autorest.SendWithSender(client, req,
- azure.DoRetryWithRegistration(client.Client))
+ resp, err = autorest.SendWithSender(client, req, sd...)
if err != nil {
return
}
diff --git a/services/servicefabric/mgmt/2019-03-01/servicefabric/clusterversions.go b/services/servicefabric/mgmt/2019-03-01/servicefabric/clusterversions.go
index 9702715a5ccf..cc740eb6e894 100644
--- a/services/servicefabric/mgmt/2019-03-01/servicefabric/clusterversions.go
+++ b/services/servicefabric/mgmt/2019-03-01/servicefabric/clusterversions.go
@@ -84,7 +84,7 @@ func (client ClusterVersionsClient) GetPreparer(ctx context.Context, location st
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-02-01"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -100,8 +100,8 @@ func (client ClusterVersionsClient) GetPreparer(ctx context.Context, location st
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client ClusterVersionsClient) GetSender(req *http.Request) (*http.Response, error) {
- return autorest.SendWithSender(client, req,
- azure.DoRetryWithRegistration(client.Client))
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
}
// GetResponder handles the response to the Get request. The method always
@@ -163,7 +163,7 @@ func (client ClusterVersionsClient) GetByEnvironmentPreparer(ctx context.Context
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-02-01"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -179,8 +179,8 @@ func (client ClusterVersionsClient) GetByEnvironmentPreparer(ctx context.Context
// GetByEnvironmentSender sends the GetByEnvironment request. The method will close the
// http.Response Body if it receives an error.
func (client ClusterVersionsClient) GetByEnvironmentSender(req *http.Request) (*http.Response, error) {
- return autorest.SendWithSender(client, req,
- azure.DoRetryWithRegistration(client.Client))
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
}
// GetByEnvironmentResponder handles the response to the GetByEnvironment request. The method always
@@ -238,7 +238,7 @@ func (client ClusterVersionsClient) ListPreparer(ctx context.Context, location s
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-02-01"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -254,8 +254,8 @@ func (client ClusterVersionsClient) ListPreparer(ctx context.Context, location s
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client ClusterVersionsClient) ListSender(req *http.Request) (*http.Response, error) {
- return autorest.SendWithSender(client, req,
- azure.DoRetryWithRegistration(client.Client))
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
}
// ListResponder handles the response to the List request. The method always
@@ -315,7 +315,7 @@ func (client ClusterVersionsClient) ListByEnvironmentPreparer(ctx context.Contex
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2018-02-01"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -331,8 +331,8 @@ func (client ClusterVersionsClient) ListByEnvironmentPreparer(ctx context.Contex
// ListByEnvironmentSender sends the ListByEnvironment request. The method will close the
// http.Response Body if it receives an error.
func (client ClusterVersionsClient) ListByEnvironmentSender(req *http.Request) (*http.Response, error) {
- return autorest.SendWithSender(client, req,
- azure.DoRetryWithRegistration(client.Client))
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
}
// ListByEnvironmentResponder handles the response to the ListByEnvironment request. The method always
diff --git a/services/servicefabric/mgmt/2019-03-01/servicefabric/models.go b/services/servicefabric/mgmt/2019-03-01/servicefabric/models.go
index 3a025711071e..8c5890118618 100644
--- a/services/servicefabric/mgmt/2019-03-01/servicefabric/models.go
+++ b/services/servicefabric/mgmt/2019-03-01/servicefabric/models.go
@@ -46,6 +46,23 @@ func PossibleArmServicePackageActivationModeValues() []ArmServicePackageActivati
return []ArmServicePackageActivationMode{ExclusiveProcess, SharedProcess}
}
+// ArmUpgradeFailureAction enumerates the values for arm upgrade failure action.
+type ArmUpgradeFailureAction string
+
+const (
+ // Manual Indicates that a manual repair will need to be performed by the administrator if the upgrade
+ // fails. Service Fabric will not proceed to the next upgrade domain automatically.
+ Manual ArmUpgradeFailureAction = "Manual"
+ // Rollback Indicates that a rollback of the upgrade will be performed by Service Fabric if the upgrade
+ // fails.
+ Rollback ArmUpgradeFailureAction = "Rollback"
+)
+
+// PossibleArmUpgradeFailureActionValues returns an array of possible values for the ArmUpgradeFailureAction const type.
+func PossibleArmUpgradeFailureActionValues() []ArmUpgradeFailureAction {
+ return []ArmUpgradeFailureAction{Manual, Rollback}
+}
+
// ClusterState enumerates the values for cluster state.
type ClusterState string
@@ -385,15 +402,15 @@ func PossibleTypeValues() []Type {
type UpgradeMode string
const (
- // Automatic ...
- Automatic UpgradeMode = "Automatic"
- // Manual ...
- Manual UpgradeMode = "Manual"
+ // UpgradeModeAutomatic ...
+ UpgradeModeAutomatic UpgradeMode = "Automatic"
+ // UpgradeModeManual ...
+ UpgradeModeManual UpgradeMode = "Manual"
)
// PossibleUpgradeModeValues returns an array of possible values for the UpgradeMode const type.
func PossibleUpgradeModeValues() []UpgradeMode {
- return []UpgradeMode{Automatic, Manual}
+ return []UpgradeMode{UpgradeModeAutomatic, UpgradeModeManual}
}
// UpgradeMode1 enumerates the values for upgrade mode 1.
@@ -511,23 +528,23 @@ func (ahp ApplicationHealthPolicy) MarshalJSON() ([]byte, error) {
// can be used to limit the total consumption of this metric by the services of this application.
type ApplicationMetricDescription struct {
// Name - The name of the metric.
- Name *string `json:"Name,omitempty"`
+ Name *string `json:"name,omitempty"`
// MaximumCapacity - The maximum node capacity for Service Fabric application.
// This is the maximum Load for an instance of this application on a single node. Even if the capacity of node is greater than this value, Service Fabric will limit the total load of services within the application on each node to this value.
// If set to zero, capacity for this metric is unlimited on each node.
// When creating a new application with application capacity defined, the product of MaximumNodes and this value must always be smaller than or equal to TotalApplicationCapacity.
// When updating existing application with application capacity, the product of MaximumNodes and this value must always be smaller than or equal to TotalApplicationCapacity.
- MaximumCapacity *int64 `json:"MaximumCapacity,omitempty"`
+ MaximumCapacity *int64 `json:"maximumCapacity,omitempty"`
// ReservationCapacity - The node reservation capacity for Service Fabric application.
// This is the amount of load which is reserved on nodes which have instances of this application.
// If MinimumNodes is specified, then the product of these values will be the capacity reserved in the cluster for the application.
// If set to zero, no capacity is reserved for this metric.
// When setting application capacity or when updating application capacity; this value must be smaller than or equal to MaximumCapacity for each metric.
- ReservationCapacity *int64 `json:"ReservationCapacity,omitempty"`
+ ReservationCapacity *int64 `json:"reservationCapacity,omitempty"`
// TotalApplicationCapacity - The total metric capacity for Service Fabric application.
// This is the total metric capacity for this application in the cluster. Service Fabric will try to limit the sum of loads of services within the application to this value.
// When creating a new application with application capacity defined, the product of MaximumNodes and MaximumCapacity must always be smaller than or equal to this value.
- TotalApplicationCapacity *int64 `json:"TotalApplicationCapacity,omitempty"`
+ TotalApplicationCapacity *int64 `json:"totalApplicationCapacity,omitempty"`
}
// ApplicationResource the application resource.
@@ -541,8 +558,12 @@ type ApplicationResource struct {
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; Azure resource type.
Type *string `json:"type,omitempty"`
- // Location - Azure resource location.
+ // Location - It will be deprecated in New API, resource location depends on the parent resource.
Location *string `json:"location,omitempty"`
+ // Tags - Azure resource tags.
+ Tags map[string]*string `json:"tags"`
+ // Etag - READ-ONLY; Azure resource etag.
+ Etag *string `json:"etag,omitempty"`
}
// MarshalJSON is the custom marshaler for ApplicationResource.
@@ -554,6 +575,9 @@ func (ar ApplicationResource) MarshalJSON() ([]byte, error) {
if ar.Location != nil {
objectMap["location"] = ar.Location
}
+ if ar.Tags != nil {
+ objectMap["tags"] = ar.Tags
+ }
return json.Marshal(objectMap)
}
@@ -611,6 +635,24 @@ func (ar *ApplicationResource) UnmarshalJSON(body []byte) error {
}
ar.Location = &location
}
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ ar.Tags = tags
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ ar.Etag = &etag
+ }
}
}
@@ -621,6 +663,8 @@ func (ar *ApplicationResource) UnmarshalJSON(body []byte) error {
type ApplicationResourceList struct {
autorest.Response `json:"-"`
Value *[]ApplicationResource `json:"value,omitempty"`
+ // NextLink - READ-ONLY; URL to get the next set of application list results if there are any.
+ NextLink *string `json:"nextLink,omitempty"`
}
// ApplicationResourceProperties the application resource properties.
@@ -677,7 +721,6 @@ func (arp ApplicationResourceProperties) MarshalJSON() ([]byte, error) {
// ApplicationResourceUpdate the application resource for patch operations.
type ApplicationResourceUpdate struct {
- autorest.Response `json:"-"`
// ApplicationResourceUpdateProperties - The application resource properties for patch operations.
*ApplicationResourceUpdateProperties `json:"properties,omitempty"`
// ID - READ-ONLY; Azure resource identifier.
@@ -686,8 +729,12 @@ type ApplicationResourceUpdate struct {
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; Azure resource type.
Type *string `json:"type,omitempty"`
- // Location - Azure resource location.
+ // Location - It will be deprecated in New API, resource location depends on the parent resource.
Location *string `json:"location,omitempty"`
+ // Tags - Azure resource tags.
+ Tags map[string]*string `json:"tags"`
+ // Etag - READ-ONLY; Azure resource etag.
+ Etag *string `json:"etag,omitempty"`
}
// MarshalJSON is the custom marshaler for ApplicationResourceUpdate.
@@ -699,6 +746,9 @@ func (aru ApplicationResourceUpdate) MarshalJSON() ([]byte, error) {
if aru.Location != nil {
objectMap["location"] = aru.Location
}
+ if aru.Tags != nil {
+ objectMap["tags"] = aru.Tags
+ }
return json.Marshal(objectMap)
}
@@ -756,6 +806,24 @@ func (aru *ApplicationResourceUpdate) UnmarshalJSON(body []byte) error {
}
aru.Location = &location
}
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ aru.Tags = tags
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ aru.Etag = &etag
+ }
}
}
@@ -807,30 +875,30 @@ func (arup ApplicationResourceUpdateProperties) MarshalJSON() ([]byte, error) {
return json.Marshal(objectMap)
}
-// ApplicationsCreateFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
-type ApplicationsCreateFuture struct {
+// ApplicationsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
+type ApplicationsCreateOrUpdateFuture struct {
azure.Future
}
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
-func (future *ApplicationsCreateFuture) Result(client ApplicationsClient) (ar ApplicationResource, err error) {
+func (future *ApplicationsCreateOrUpdateFuture) Result(client ApplicationsClient) (ar ApplicationResource, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
- err = autorest.NewErrorWithError(err, "servicefabric.ApplicationsCreateFuture", "Result", future.Response(), "Polling failure")
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
- err = azure.NewAsyncOpIncompleteError("servicefabric.ApplicationsCreateFuture")
+ err = azure.NewAsyncOpIncompleteError("servicefabric.ApplicationsCreateOrUpdateFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
if ar.Response.Response, err = future.GetResult(sender); err == nil && ar.Response.Response.StatusCode != http.StatusNoContent {
- ar, err = client.CreateResponder(ar.Response.Response)
+ ar, err = client.CreateOrUpdateResponder(ar.Response.Response)
if err != nil {
- err = autorest.NewErrorWithError(err, "servicefabric.ApplicationsCreateFuture", "Result", ar.Response.Response, "Failure responding to request")
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationsCreateOrUpdateFuture", "Result", ar.Response.Response, "Failure responding to request")
}
}
return
@@ -867,7 +935,7 @@ type ApplicationsUpdateFuture struct {
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
-func (future *ApplicationsUpdateFuture) Result(client ApplicationsClient) (aru ApplicationResourceUpdate, err error) {
+func (future *ApplicationsUpdateFuture) Result(client ApplicationsClient) (ar ApplicationResource, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -879,10 +947,10 @@ func (future *ApplicationsUpdateFuture) Result(client ApplicationsClient) (aru A
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if aru.Response.Response, err = future.GetResult(sender); err == nil && aru.Response.Response.StatusCode != http.StatusNoContent {
- aru, err = client.UpdateResponder(aru.Response.Response)
+ if ar.Response.Response, err = future.GetResult(sender); err == nil && ar.Response.Response.StatusCode != http.StatusNoContent {
+ ar, err = client.UpdateResponder(ar.Response.Response)
if err != nil {
- err = autorest.NewErrorWithError(err, "servicefabric.ApplicationsUpdateFuture", "Result", aru.Response.Response, "Failure responding to request")
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationsUpdateFuture", "Result", ar.Response.Response, "Failure responding to request")
}
}
return
@@ -899,8 +967,12 @@ type ApplicationTypeResource struct {
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; Azure resource type.
Type *string `json:"type,omitempty"`
- // Location - Azure resource location.
+ // Location - It will be deprecated in New API, resource location depends on the parent resource.
Location *string `json:"location,omitempty"`
+ // Tags - Azure resource tags.
+ Tags map[string]*string `json:"tags"`
+ // Etag - READ-ONLY; Azure resource etag.
+ Etag *string `json:"etag,omitempty"`
}
// MarshalJSON is the custom marshaler for ApplicationTypeResource.
@@ -912,6 +984,9 @@ func (atr ApplicationTypeResource) MarshalJSON() ([]byte, error) {
if atr.Location != nil {
objectMap["location"] = atr.Location
}
+ if atr.Tags != nil {
+ objectMap["tags"] = atr.Tags
+ }
return json.Marshal(objectMap)
}
@@ -969,6 +1044,24 @@ func (atr *ApplicationTypeResource) UnmarshalJSON(body []byte) error {
}
atr.Location = &location
}
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ atr.Tags = tags
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ atr.Etag = &etag
+ }
}
}
@@ -979,6 +1072,8 @@ func (atr *ApplicationTypeResource) UnmarshalJSON(body []byte) error {
type ApplicationTypeResourceList struct {
autorest.Response `json:"-"`
Value *[]ApplicationTypeResource `json:"value,omitempty"`
+ // NextLink - READ-ONLY; URL to get the next set of application type list results if there are any.
+ NextLink *string `json:"nextLink,omitempty"`
}
// ApplicationTypeResourceProperties the application type name properties
@@ -1022,8 +1117,12 @@ type ApplicationTypeVersionResource struct {
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; Azure resource type.
Type *string `json:"type,omitempty"`
- // Location - Azure resource location.
+ // Location - It will be deprecated in New API, resource location depends on the parent resource.
Location *string `json:"location,omitempty"`
+ // Tags - Azure resource tags.
+ Tags map[string]*string `json:"tags"`
+ // Etag - READ-ONLY; Azure resource etag.
+ Etag *string `json:"etag,omitempty"`
}
// MarshalJSON is the custom marshaler for ApplicationTypeVersionResource.
@@ -1035,6 +1134,9 @@ func (atvr ApplicationTypeVersionResource) MarshalJSON() ([]byte, error) {
if atvr.Location != nil {
objectMap["location"] = atvr.Location
}
+ if atvr.Tags != nil {
+ objectMap["tags"] = atvr.Tags
+ }
return json.Marshal(objectMap)
}
@@ -1092,6 +1194,24 @@ func (atvr *ApplicationTypeVersionResource) UnmarshalJSON(body []byte) error {
}
atvr.Location = &location
}
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ atvr.Tags = tags
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ atvr.Etag = &etag
+ }
}
}
@@ -1103,6 +1223,8 @@ func (atvr *ApplicationTypeVersionResource) UnmarshalJSON(body []byte) error {
type ApplicationTypeVersionResourceList struct {
autorest.Response `json:"-"`
Value *[]ApplicationTypeVersionResource `json:"value,omitempty"`
+ // NextLink - READ-ONLY; URL to get the next set of application type version list results if there are any.
+ NextLink *string `json:"nextLink,omitempty"`
}
// ApplicationTypeVersionResourceProperties the properties of the application type version resource.
@@ -1124,30 +1246,30 @@ func (atvrp ApplicationTypeVersionResourceProperties) MarshalJSON() ([]byte, err
return json.Marshal(objectMap)
}
-// ApplicationTypeVersionsCreateFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
-type ApplicationTypeVersionsCreateFuture struct {
+// ApplicationTypeVersionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of
+// a long-running operation.
+type ApplicationTypeVersionsCreateOrUpdateFuture struct {
azure.Future
}
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
-func (future *ApplicationTypeVersionsCreateFuture) Result(client ApplicationTypeVersionsClient) (atvr ApplicationTypeVersionResource, err error) {
+func (future *ApplicationTypeVersionsCreateOrUpdateFuture) Result(client ApplicationTypeVersionsClient) (atvr ApplicationTypeVersionResource, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
- err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypeVersionsCreateFuture", "Result", future.Response(), "Polling failure")
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypeVersionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
- err = azure.NewAsyncOpIncompleteError("servicefabric.ApplicationTypeVersionsCreateFuture")
+ err = azure.NewAsyncOpIncompleteError("servicefabric.ApplicationTypeVersionsCreateOrUpdateFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
if atvr.Response.Response, err = future.GetResult(sender); err == nil && atvr.Response.Response.StatusCode != http.StatusNoContent {
- atvr, err = client.CreateResponder(atvr.Response.Response)
+ atvr, err = client.CreateOrUpdateResponder(atvr.Response.Response)
if err != nil {
- err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypeVersionsCreateFuture", "Result", atvr.Response.Response, "Failure responding to request")
+ err = autorest.NewErrorWithError(err, "servicefabric.ApplicationTypeVersionsCreateOrUpdateFuture", "Result", atvr.Response.Response, "Failure responding to request")
}
}
return
@@ -1179,11 +1301,11 @@ func (future *ApplicationTypeVersionsDeleteFuture) Result(client ApplicationType
// ApplicationUpgradePolicy describes the policy for a monitored application upgrade.
type ApplicationUpgradePolicy struct {
// UpgradeReplicaSetCheckTimeout - The maximum amount of time to block processing of an upgrade domain and prevent loss of availability when there are unexpected issues. When this timeout expires, processing of the upgrade domain will proceed regardless of availability loss issues. The timeout is reset at the start of each upgrade domain. Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit integer).
- UpgradeReplicaSetCheckTimeout *int64 `json:"upgradeReplicaSetCheckTimeout,omitempty"`
+ UpgradeReplicaSetCheckTimeout *string `json:"upgradeReplicaSetCheckTimeout,omitempty"`
// ForceRestart - If true, then processes are forcefully restarted during upgrade even when the code version has not changed (the upgrade only changes configuration or data).
ForceRestart *bool `json:"forceRestart,omitempty"`
// RollingUpgradeMonitoringPolicy - The policy used for monitoring the application upgrade
- RollingUpgradeMonitoringPolicy *RollingUpgradeMonitoringPolicy `json:"rollingUpgradeMonitoringPolicy,omitempty"`
+ RollingUpgradeMonitoringPolicy *ArmRollingUpgradeMonitoringPolicy `json:"rollingUpgradeMonitoringPolicy,omitempty"`
// ApplicationHealthPolicy - Defines a health policy used to evaluate the health of an application or one of its children entities.
ApplicationHealthPolicy *ArmApplicationHealthPolicy `json:"applicationHealthPolicy,omitempty"`
}
@@ -1192,37 +1314,64 @@ type ApplicationUpgradePolicy struct {
// of its children entities.
type ArmApplicationHealthPolicy struct {
// ConsiderWarningAsError - Indicates whether warnings are treated with the same severity as errors.
- ConsiderWarningAsError *bool `json:"ConsiderWarningAsError,omitempty"`
+ ConsiderWarningAsError *bool `json:"considerWarningAsError,omitempty"`
// MaxPercentUnhealthyDeployedApplications - The maximum allowed percentage of unhealthy deployed applications. Allowed values are Byte values from zero to 100.
// The percentage represents the maximum tolerated percentage of deployed applications that can be unhealthy before the application is considered in error.
// This is calculated by dividing the number of unhealthy deployed applications over the number of nodes where the application is currently deployed on in the cluster.
// The computation rounds up to tolerate one failure on small numbers of nodes. Default percentage is zero.
- MaxPercentUnhealthyDeployedApplications *int32 `json:"MaxPercentUnhealthyDeployedApplications,omitempty"`
+ MaxPercentUnhealthyDeployedApplications *int32 `json:"maxPercentUnhealthyDeployedApplications,omitempty"`
// DefaultServiceTypeHealthPolicy - The health policy used by default to evaluate the health of a service type.
- DefaultServiceTypeHealthPolicy *ServiceTypeHealthPolicy `json:"DefaultServiceTypeHealthPolicy,omitempty"`
+ DefaultServiceTypeHealthPolicy *ArmServiceTypeHealthPolicy `json:"defaultServiceTypeHealthPolicy,omitempty"`
// ServiceTypeHealthPolicyMap - The map with service type health policy per service type name. The map is empty by default.
- ServiceTypeHealthPolicyMap map[string]*ServiceTypeHealthPolicy `json:"ServiceTypeHealthPolicyMap"`
+ ServiceTypeHealthPolicyMap map[string]*ArmServiceTypeHealthPolicy `json:"serviceTypeHealthPolicyMap"`
}
// MarshalJSON is the custom marshaler for ArmApplicationHealthPolicy.
func (aahp ArmApplicationHealthPolicy) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if aahp.ConsiderWarningAsError != nil {
- objectMap["ConsiderWarningAsError"] = aahp.ConsiderWarningAsError
+ objectMap["considerWarningAsError"] = aahp.ConsiderWarningAsError
}
if aahp.MaxPercentUnhealthyDeployedApplications != nil {
- objectMap["MaxPercentUnhealthyDeployedApplications"] = aahp.MaxPercentUnhealthyDeployedApplications
+ objectMap["maxPercentUnhealthyDeployedApplications"] = aahp.MaxPercentUnhealthyDeployedApplications
}
if aahp.DefaultServiceTypeHealthPolicy != nil {
- objectMap["DefaultServiceTypeHealthPolicy"] = aahp.DefaultServiceTypeHealthPolicy
+ objectMap["defaultServiceTypeHealthPolicy"] = aahp.DefaultServiceTypeHealthPolicy
}
if aahp.ServiceTypeHealthPolicyMap != nil {
- objectMap["ServiceTypeHealthPolicyMap"] = aahp.ServiceTypeHealthPolicyMap
+ objectMap["serviceTypeHealthPolicyMap"] = aahp.ServiceTypeHealthPolicyMap
}
return json.Marshal(objectMap)
}
-// AvailableOperationDisplay operation supported by Service Fabric resource provider
+// ArmRollingUpgradeMonitoringPolicy the policy used for monitoring the application upgrade
+type ArmRollingUpgradeMonitoringPolicy struct {
+ // FailureAction - The activation Mode of the service package. Possible values include: 'Rollback', 'Manual'
+ FailureAction ArmUpgradeFailureAction `json:"failureAction,omitempty"`
+ // HealthCheckWaitDuration - The amount of time to wait after completing an upgrade domain before applying health policies. It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds.
+ HealthCheckWaitDuration *string `json:"healthCheckWaitDuration,omitempty"`
+ // HealthCheckStableDuration - The amount of time that the application or cluster must remain healthy before the upgrade proceeds to the next upgrade domain. It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds.
+ HealthCheckStableDuration *string `json:"healthCheckStableDuration,omitempty"`
+ // HealthCheckRetryTimeout - The amount of time to retry health evaluation when the application or cluster is unhealthy before FailureAction is executed. It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds.
+ HealthCheckRetryTimeout *string `json:"healthCheckRetryTimeout,omitempty"`
+ // UpgradeTimeout - The amount of time the overall upgrade has to complete before FailureAction is executed. It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds.
+ UpgradeTimeout *string `json:"upgradeTimeout,omitempty"`
+ // UpgradeDomainTimeout - The amount of time each upgrade domain has to complete before FailureAction is executed. It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds.
+ UpgradeDomainTimeout *string `json:"upgradeDomainTimeout,omitempty"`
+}
+
+// ArmServiceTypeHealthPolicy represents the health policy used to evaluate the health of services
+// belonging to a service type.
+type ArmServiceTypeHealthPolicy struct {
+ // MaxPercentUnhealthyServices - The maximum percentage of services allowed to be unhealthy before your application is considered in error.
+ MaxPercentUnhealthyServices *int32 `json:"maxPercentUnhealthyServices,omitempty"`
+ // MaxPercentUnhealthyPartitionsPerService - The maximum percentage of partitions per service allowed to be unhealthy before your application is considered in error.
+ MaxPercentUnhealthyPartitionsPerService *int32 `json:"maxPercentUnhealthyPartitionsPerService,omitempty"`
+ // MaxPercentUnhealthyReplicasPerPartition - The maximum percentage of replicas per partition allowed to be unhealthy before your application is considered in error.
+ MaxPercentUnhealthyReplicasPerPartition *int32 `json:"maxPercentUnhealthyReplicasPerPartition,omitempty"`
+}
+
+// AvailableOperationDisplay operation supported by the Service Fabric resource provider
type AvailableOperationDisplay struct {
// Provider - The name of the provider.
Provider *string `json:"provider,omitempty"`
@@ -1287,6 +1436,8 @@ type Cluster struct {
Location *string `json:"location,omitempty"`
// Tags - Azure resource tags.
Tags map[string]*string `json:"tags"`
+ // Etag - READ-ONLY; Azure resource etag.
+ Etag *string `json:"etag,omitempty"`
}
// MarshalJSON is the custom marshaler for Cluster.
@@ -1367,13 +1518,22 @@ func (c *Cluster) UnmarshalJSON(body []byte) error {
}
c.Tags = tags
}
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ c.Etag = &etag
+ }
}
}
return nil
}
-// ClusterCodeVersionsListResult the list results of the ServiceFabric runtime versions.
+// ClusterCodeVersionsListResult the list results of the Service Fabric runtime versions.
type ClusterCodeVersionsListResult struct {
autorest.Response `json:"-"`
Value *[]ClusterCodeVersionsResult `json:"value,omitempty"`
@@ -1381,7 +1541,7 @@ type ClusterCodeVersionsListResult struct {
NextLink *string `json:"nextLink,omitempty"`
}
-// ClusterCodeVersionsResult the result of the ServiceFabric runtime versions
+// ClusterCodeVersionsResult the result of the Service Fabric runtime versions
type ClusterCodeVersionsResult struct {
// ID - The identification of the result
ID *string `json:"id,omitempty"`
@@ -1542,6 +1702,8 @@ type ClusterProperties struct {
ClusterState ClusterState `json:"clusterState,omitempty"`
// DiagnosticsStorageAccountConfig - The storage account information for storing Service Fabric diagnostic logs.
DiagnosticsStorageAccountConfig *DiagnosticsStorageAccountConfig `json:"diagnosticsStorageAccountConfig,omitempty"`
+ // EventStoreServiceEnabled - Indicates if the event store service is enabled.
+ EventStoreServiceEnabled *bool `json:"eventStoreServiceEnabled,omitempty"`
// FabricSettings - The list of custom fabric settings to configure the cluster.
FabricSettings *[]SettingsSectionDescription `json:"fabricSettings,omitempty"`
// ManagementEndpoint - The http management endpoint of the cluster.
@@ -1567,7 +1729,7 @@ type ClusterProperties struct {
// UpgradeMode - The upgrade mode of the cluster when new Service Fabric runtime version is available.
// - Automatic - The cluster will be automatically upgraded to the latest Service Fabric runtime version as soon as it is available.
// - Manual - The cluster will not be automatically upgraded to the latest Service Fabric runtime version. The cluster is upgraded by setting the **clusterCodeVersion** property in the cluster resource.
- // . Possible values include: 'Automatic', 'Manual'
+ // . Possible values include: 'UpgradeModeAutomatic', 'UpgradeModeManual'
UpgradeMode UpgradeMode `json:"upgradeMode,omitempty"`
// VMImage - The VM image VMSS has been configured with. Generic names such as Windows or Linux can be used.
VMImage *string `json:"vmImage,omitempty"`
@@ -1588,6 +1750,8 @@ type ClusterPropertiesUpdateParameters struct {
ClientCertificateThumbprints *[]ClientCertificateThumbprint `json:"clientCertificateThumbprints,omitempty"`
// ClusterCodeVersion - The Service Fabric runtime version of the cluster. This property can only by set the user when **upgradeMode** is set to 'Manual'. To get list of available Service Fabric versions for new clusters use [ClusterVersion API](./ClusterVersion.md). To get the list of available version for existing clusters use **availableClusterVersions**.
ClusterCodeVersion *string `json:"clusterCodeVersion,omitempty"`
+ // EventStoreServiceEnabled - Indicates if the event store service is enabled.
+ EventStoreServiceEnabled *bool `json:"eventStoreServiceEnabled,omitempty"`
// FabricSettings - The list of custom fabric settings to configure the cluster. This will overwrite the existing list.
FabricSettings *[]SettingsSectionDescription `json:"fabricSettings,omitempty"`
// NodeTypes - The list of node types in the cluster. This will overwrite the existing list.
@@ -1611,30 +1775,30 @@ type ClusterPropertiesUpdateParameters struct {
UpgradeMode UpgradeMode1 `json:"upgradeMode,omitempty"`
}
-// ClustersCreateFuture an abstraction for monitoring and retrieving the results of a long-running
+// ClustersCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
-type ClustersCreateFuture struct {
+type ClustersCreateOrUpdateFuture struct {
azure.Future
}
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
-func (future *ClustersCreateFuture) Result(client ClustersClient) (c Cluster, err error) {
+func (future *ClustersCreateOrUpdateFuture) Result(client ClustersClient) (c Cluster, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
- err = autorest.NewErrorWithError(err, "servicefabric.ClustersCreateFuture", "Result", future.Response(), "Polling failure")
+ err = autorest.NewErrorWithError(err, "servicefabric.ClustersCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
- err = azure.NewAsyncOpIncompleteError("servicefabric.ClustersCreateFuture")
+ err = azure.NewAsyncOpIncompleteError("servicefabric.ClustersCreateOrUpdateFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
if c.Response.Response, err = future.GetResult(sender); err == nil && c.Response.Response.StatusCode != http.StatusNoContent {
- c, err = client.CreateResponder(c.Response.Response)
+ c, err = client.CreateOrUpdateResponder(c.Response.Response)
if err != nil {
- err = autorest.NewErrorWithError(err, "servicefabric.ClustersCreateFuture", "Result", c.Response.Response, "Failure responding to request")
+ err = autorest.NewErrorWithError(err, "servicefabric.ClustersCreateOrUpdateFuture", "Result", c.Response.Response, "Failure responding to request")
}
}
return
@@ -1762,7 +1926,7 @@ func (cudhp ClusterUpgradeDeltaHealthPolicy) MarshalJSON() ([]byte, error) {
type ClusterUpgradePolicy struct {
// ForceRestart - If true, then processes are forcefully restarted during upgrade even when the code version has not changed (the upgrade only changes configuration or data).
ForceRestart *bool `json:"forceRestart,omitempty"`
- // UpgradeReplicaSetCheckTimeout - The maximum amount of time to block processing of an upgrade domain and revent loss of availability when there are unexpected issues. When this timeout expires, processing of the upgrade domain will proceed regardless of availability loss issues. The timeout is reset at the start of each upgrade domain. The timeout can be in either hh:mm:ss or in d.hh:mm:ss.ms format.
+ // UpgradeReplicaSetCheckTimeout - The maximum amount of time to block processing of an upgrade domain and prevent loss of availability when there are unexpected issues. When this timeout expires, processing of the upgrade domain will proceed regardless of availability loss issues. The timeout is reset at the start of each upgrade domain. The timeout can be in either hh:mm:ss or in d.hh:mm:ss.ms format.
UpgradeReplicaSetCheckTimeout *string `json:"upgradeReplicaSetCheckTimeout,omitempty"`
// HealthCheckWaitDuration - The length of time to wait after completing an upgrade domain before performing health checks. The duration can be in either hh:mm:ss or in d.hh:mm:ss.ms format.
HealthCheckWaitDuration *string `json:"healthCheckWaitDuration,omitempty"`
@@ -1834,7 +1998,7 @@ type NamedPartitionSchemeDescription struct {
// Names - Array of size specified by the ‘Count’ parameter, for the names of the partitions.
Names *[]string `json:"Names,omitempty"`
// PartitionScheme - Possible values include: 'PartitionSchemePartitionSchemeDescription', 'PartitionSchemeNamed', 'PartitionSchemeSingleton', 'PartitionSchemeUniformInt64Range'
- PartitionScheme PartitionSchemeBasicPartitionSchemeDescription `json:"PartitionScheme,omitempty"`
+ PartitionScheme PartitionSchemeBasicPartitionSchemeDescription `json:"partitionScheme,omitempty"`
}
// MarshalJSON is the custom marshaler for NamedPartitionSchemeDescription.
@@ -1848,7 +2012,7 @@ func (npsd NamedPartitionSchemeDescription) MarshalJSON() ([]byte, error) {
objectMap["Names"] = npsd.Names
}
if npsd.PartitionScheme != "" {
- objectMap["PartitionScheme"] = npsd.PartitionScheme
+ objectMap["partitionScheme"] = npsd.PartitionScheme
}
return json.Marshal(objectMap)
}
@@ -1899,7 +2063,7 @@ type NodeTypeDescription struct {
DurabilityLevel DurabilityLevel `json:"durabilityLevel,omitempty"`
// ApplicationPorts - The range of ports from which cluster assigned port to Service Fabric applications.
ApplicationPorts *EndpointRangeDescription `json:"applicationPorts,omitempty"`
- // EphemeralPorts - The range of empheral ports that nodes in this node type should be configured with.
+ // EphemeralPorts - The range of ephemeral ports that nodes in this node type should be configured with.
EphemeralPorts *EndpointRangeDescription `json:"ephemeralPorts,omitempty"`
// IsPrimary - The node type on which system services will run. Only one node type should be marked as primary. Primary node type cannot be deleted or changed for existing clusters.
IsPrimary *bool `json:"isPrimary,omitempty"`
@@ -1948,10 +2112,11 @@ func (ntd NodeTypeDescription) MarshalJSON() ([]byte, error) {
return json.Marshal(objectMap)
}
-// OperationListResult describes the result of the request to list Service Fabric operations.
+// OperationListResult describes the result of the request to list Service Fabric resource provider
+// operations.
type OperationListResult struct {
autorest.Response `json:"-"`
- // Value - List of Service Fabric operations supported by the Microsoft.ServiceFabric resource provider.
+ // Value - List of operations supported by the Service Fabric resource provider.
Value *[]OperationResult `json:"value,omitempty"`
// NextLink - READ-ONLY; URL to get the next set of operation list results if there are any.
NextLink *string `json:"nextLink,omitempty"`
@@ -2117,7 +2282,7 @@ type BasicPartitionSchemeDescription interface {
// PartitionSchemeDescription describes how the service is partitioned.
type PartitionSchemeDescription struct {
// PartitionScheme - Possible values include: 'PartitionSchemePartitionSchemeDescription', 'PartitionSchemeNamed', 'PartitionSchemeSingleton', 'PartitionSchemeUniformInt64Range'
- PartitionScheme PartitionSchemeBasicPartitionSchemeDescription `json:"PartitionScheme,omitempty"`
+ PartitionScheme PartitionSchemeBasicPartitionSchemeDescription `json:"partitionScheme,omitempty"`
}
func unmarshalBasicPartitionSchemeDescription(body []byte) (BasicPartitionSchemeDescription, error) {
@@ -2127,7 +2292,7 @@ func unmarshalBasicPartitionSchemeDescription(body []byte) (BasicPartitionScheme
return nil, err
}
- switch m["PartitionScheme"] {
+ switch m["partitionScheme"] {
case string(PartitionSchemeNamed):
var npsd NamedPartitionSchemeDescription
err := json.Unmarshal(body, &npsd)
@@ -2170,7 +2335,7 @@ func (psd PartitionSchemeDescription) MarshalJSON() ([]byte, error) {
psd.PartitionScheme = PartitionSchemePartitionSchemeDescription
objectMap := make(map[string]interface{})
if psd.PartitionScheme != "" {
- objectMap["PartitionScheme"] = psd.PartitionScheme
+ objectMap["partitionScheme"] = psd.PartitionScheme
}
return json.Marshal(objectMap)
}
@@ -2208,8 +2373,24 @@ type ProxyResource struct {
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; Azure resource type.
Type *string `json:"type,omitempty"`
- // Location - Azure resource location.
+ // Location - It will be deprecated in New API, resource location depends on the parent resource.
Location *string `json:"location,omitempty"`
+ // Tags - Azure resource tags.
+ Tags map[string]*string `json:"tags"`
+ // Etag - READ-ONLY; Azure resource etag.
+ Etag *string `json:"etag,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for ProxyResource.
+func (pr ProxyResource) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if pr.Location != nil {
+ objectMap["location"] = pr.Location
+ }
+ if pr.Tags != nil {
+ objectMap["tags"] = pr.Tags
+ }
+ return json.Marshal(objectMap)
}
// Resource the resource model definition.
@@ -2224,6 +2405,8 @@ type Resource struct {
Location *string `json:"location,omitempty"`
// Tags - Azure resource tags.
Tags map[string]*string `json:"tags"`
+ // Etag - READ-ONLY; Azure resource etag.
+ Etag *string `json:"etag,omitempty"`
}
// MarshalJSON is the custom marshaler for Resource.
@@ -2238,20 +2421,6 @@ func (r Resource) MarshalJSON() ([]byte, error) {
return json.Marshal(objectMap)
}
-// RollingUpgradeMonitoringPolicy the policy used for monitoring the application upgrade
-type RollingUpgradeMonitoringPolicy struct {
- // HealthCheckWaitDuration - The amount of time to wait after completing an upgrade domain before applying health policies. It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds.
- HealthCheckWaitDuration *string `json:"healthCheckWaitDuration,omitempty"`
- // HealthCheckStableDuration - The amount of time that the application or cluster must remain healthy before the upgrade proceeds to the next upgrade domain. It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds.
- HealthCheckStableDuration *string `json:"healthCheckStableDuration,omitempty"`
- // HealthCheckRetryTimeout - The amount of time to retry health evaluation when the application or cluster is unhealthy before FailureAction is executed. It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds.
- HealthCheckRetryTimeout *string `json:"healthCheckRetryTimeout,omitempty"`
- // UpgradeTimeout - The amount of time the overall upgrade has to complete before FailureAction is executed. It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds.
- UpgradeTimeout *string `json:"upgradeTimeout,omitempty"`
- // UpgradeDomainTimeout - The amount of time each upgrade domain has to complete before FailureAction is executed. It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds.
- UpgradeDomainTimeout *string `json:"upgradeDomainTimeout,omitempty"`
-}
-
// ServerCertificateCommonName describes the server certificate details using common name.
type ServerCertificateCommonName struct {
// CertificateCommonName - The common name of the server certificate.
@@ -2272,23 +2441,23 @@ type ServerCertificateCommonNames struct {
// ServiceCorrelationDescription creates a particular correlation between services.
type ServiceCorrelationDescription struct {
// Scheme - The ServiceCorrelationScheme which describes the relationship between this service and the service specified via ServiceName. Possible values include: 'ServiceCorrelationSchemeInvalid', 'ServiceCorrelationSchemeAffinity', 'ServiceCorrelationSchemeAlignedAffinity', 'ServiceCorrelationSchemeNonAlignedAffinity'
- Scheme ServiceCorrelationScheme `json:"Scheme,omitempty"`
+ Scheme ServiceCorrelationScheme `json:"scheme,omitempty"`
// ServiceName - The name of the service that the correlation relationship is established with.
- ServiceName *string `json:"ServiceName,omitempty"`
+ ServiceName *string `json:"serviceName,omitempty"`
}
// ServiceLoadMetricDescription specifies a metric to load balance a service during runtime.
type ServiceLoadMetricDescription struct {
// Name - The name of the metric. If the service chooses to report load during runtime, the load metric name should match the name that is specified in Name exactly. Note that metric names are case sensitive.
- Name *string `json:"Name,omitempty"`
+ Name *string `json:"name,omitempty"`
// Weight - The service load metric relative weight, compared to other metrics configured for this service, as a number. Possible values include: 'ServiceLoadMetricWeightZero', 'ServiceLoadMetricWeightLow', 'ServiceLoadMetricWeightMedium', 'ServiceLoadMetricWeightHigh'
- Weight ServiceLoadMetricWeight `json:"Weight,omitempty"`
+ Weight ServiceLoadMetricWeight `json:"weight,omitempty"`
// PrimaryDefaultLoad - Used only for Stateful services. The default amount of load, as a number, that this service creates for this metric when it is a Primary replica.
- PrimaryDefaultLoad *int32 `json:"PrimaryDefaultLoad,omitempty"`
+ PrimaryDefaultLoad *int32 `json:"primaryDefaultLoad,omitempty"`
// SecondaryDefaultLoad - Used only for Stateful services. The default amount of load, as a number, that this service creates for this metric when it is a Secondary replica.
- SecondaryDefaultLoad *int32 `json:"SecondaryDefaultLoad,omitempty"`
+ SecondaryDefaultLoad *int32 `json:"secondaryDefaultLoad,omitempty"`
// DefaultLoad - Used only for Stateless services. The default amount of load, as a number, that this service creates for this metric.
- DefaultLoad *int32 `json:"DefaultLoad,omitempty"`
+ DefaultLoad *int32 `json:"defaultLoad,omitempty"`
}
// BasicServicePlacementPolicyDescription describes the policy to be used for placement of a Service Fabric service.
@@ -2366,8 +2535,12 @@ type ServiceResource struct {
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; Azure resource type.
Type *string `json:"type,omitempty"`
- // Location - Azure resource location.
+ // Location - It will be deprecated in New API, resource location depends on the parent resource.
Location *string `json:"location,omitempty"`
+ // Tags - Azure resource tags.
+ Tags map[string]*string `json:"tags"`
+ // Etag - READ-ONLY; Azure resource etag.
+ Etag *string `json:"etag,omitempty"`
}
// MarshalJSON is the custom marshaler for ServiceResource.
@@ -2377,6 +2550,9 @@ func (sr ServiceResource) MarshalJSON() ([]byte, error) {
if sr.Location != nil {
objectMap["location"] = sr.Location
}
+ if sr.Tags != nil {
+ objectMap["tags"] = sr.Tags
+ }
return json.Marshal(objectMap)
}
@@ -2433,6 +2609,24 @@ func (sr *ServiceResource) UnmarshalJSON(body []byte) error {
}
sr.Location = &location
}
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ sr.Tags = tags
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ sr.Etag = &etag
+ }
}
}
@@ -2443,6 +2637,8 @@ func (sr *ServiceResource) UnmarshalJSON(body []byte) error {
type ServiceResourceList struct {
autorest.Response `json:"-"`
Value *[]ServiceResource `json:"value,omitempty"`
+ // NextLink - READ-ONLY; URL to get the next set of service list results if there are any.
+ NextLink *string `json:"nextLink,omitempty"`
}
// BasicServiceResourceProperties the service resource properties.
@@ -2747,7 +2943,6 @@ func (srpb *ServiceResourcePropertiesBase) UnmarshalJSON(body []byte) error {
// ServiceResourceUpdate the service resource for patch operations.
type ServiceResourceUpdate struct {
- autorest.Response `json:"-"`
// BasicServiceResourceUpdateProperties - The service resource properties for patch operations.
BasicServiceResourceUpdateProperties `json:"properties,omitempty"`
// ID - READ-ONLY; Azure resource identifier.
@@ -2756,8 +2951,12 @@ type ServiceResourceUpdate struct {
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; Azure resource type.
Type *string `json:"type,omitempty"`
- // Location - Azure resource location.
+ // Location - It will be deprecated in New API, resource location depends on the parent resource.
Location *string `json:"location,omitempty"`
+ // Tags - Azure resource tags.
+ Tags map[string]*string `json:"tags"`
+ // Etag - READ-ONLY; Azure resource etag.
+ Etag *string `json:"etag,omitempty"`
}
// MarshalJSON is the custom marshaler for ServiceResourceUpdate.
@@ -2767,6 +2966,9 @@ func (sru ServiceResourceUpdate) MarshalJSON() ([]byte, error) {
if sru.Location != nil {
objectMap["location"] = sru.Location
}
+ if sru.Tags != nil {
+ objectMap["tags"] = sru.Tags
+ }
return json.Marshal(objectMap)
}
@@ -2823,6 +3025,24 @@ func (sru *ServiceResourceUpdate) UnmarshalJSON(body []byte) error {
}
sru.Location = &location
}
+ case "tags":
+ if v != nil {
+ var tags map[string]*string
+ err = json.Unmarshal(*v, &tags)
+ if err != nil {
+ return err
+ }
+ sru.Tags = tags
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ sru.Etag = &etag
+ }
}
}
@@ -3006,30 +3226,30 @@ func (srup *ServiceResourceUpdateProperties) UnmarshalJSON(body []byte) error {
return nil
}
-// ServicesCreateFuture an abstraction for monitoring and retrieving the results of a long-running
+// ServicesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
-type ServicesCreateFuture struct {
+type ServicesCreateOrUpdateFuture struct {
azure.Future
}
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
-func (future *ServicesCreateFuture) Result(client ServicesClient) (sr ServiceResource, err error) {
+func (future *ServicesCreateOrUpdateFuture) Result(client ServicesClient) (sr ServiceResource, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
- err = autorest.NewErrorWithError(err, "servicefabric.ServicesCreateFuture", "Result", future.Response(), "Polling failure")
+ err = autorest.NewErrorWithError(err, "servicefabric.ServicesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
- err = azure.NewAsyncOpIncompleteError("servicefabric.ServicesCreateFuture")
+ err = azure.NewAsyncOpIncompleteError("servicefabric.ServicesCreateOrUpdateFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
if sr.Response.Response, err = future.GetResult(sender); err == nil && sr.Response.Response.StatusCode != http.StatusNoContent {
- sr, err = client.CreateResponder(sr.Response.Response)
+ sr, err = client.CreateOrUpdateResponder(sr.Response.Response)
if err != nil {
- err = autorest.NewErrorWithError(err, "servicefabric.ServicesCreateFuture", "Result", sr.Response.Response, "Failure responding to request")
+ err = autorest.NewErrorWithError(err, "servicefabric.ServicesCreateOrUpdateFuture", "Result", sr.Response.Response, "Failure responding to request")
}
}
return
@@ -3066,7 +3286,7 @@ type ServicesUpdateFuture struct {
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
-func (future *ServicesUpdateFuture) Result(client ServicesClient) (sru ServiceResourceUpdate, err error) {
+func (future *ServicesUpdateFuture) Result(client ServicesClient) (sr ServiceResource, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
@@ -3078,10 +3298,10 @@ func (future *ServicesUpdateFuture) Result(client ServicesClient) (sru ServiceRe
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
- if sru.Response.Response, err = future.GetResult(sender); err == nil && sru.Response.Response.StatusCode != http.StatusNoContent {
- sru, err = client.UpdateResponder(sru.Response.Response)
+ if sr.Response.Response, err = future.GetResult(sender); err == nil && sr.Response.Response.StatusCode != http.StatusNoContent {
+ sr, err = client.UpdateResponder(sr.Response.Response)
if err != nil {
- err = autorest.NewErrorWithError(err, "servicefabric.ServicesUpdateFuture", "Result", sru.Response.Response, "Failure responding to request")
+ err = autorest.NewErrorWithError(err, "servicefabric.ServicesUpdateFuture", "Result", sr.Response.Response, "Failure responding to request")
}
}
return
@@ -3123,7 +3343,7 @@ type SettingsSectionDescription struct {
// non-partitioned service.
type SingletonPartitionSchemeDescription struct {
// PartitionScheme - Possible values include: 'PartitionSchemePartitionSchemeDescription', 'PartitionSchemeNamed', 'PartitionSchemeSingleton', 'PartitionSchemeUniformInt64Range'
- PartitionScheme PartitionSchemeBasicPartitionSchemeDescription `json:"PartitionScheme,omitempty"`
+ PartitionScheme PartitionSchemeBasicPartitionSchemeDescription `json:"partitionScheme,omitempty"`
}
// MarshalJSON is the custom marshaler for SingletonPartitionSchemeDescription.
@@ -3131,7 +3351,7 @@ func (spsd SingletonPartitionSchemeDescription) MarshalJSON() ([]byte, error) {
spsd.PartitionScheme = PartitionSchemeSingleton
objectMap := make(map[string]interface{})
if spsd.PartitionScheme != "" {
- objectMap["PartitionScheme"] = spsd.PartitionScheme
+ objectMap["partitionScheme"] = spsd.PartitionScheme
}
return json.Marshal(objectMap)
}
@@ -3971,7 +4191,7 @@ type UniformInt64RangePartitionSchemeDescription struct {
// should be split between the partition ‘Count’
HighKey *string `json:"HighKey,omitempty"`
// PartitionScheme - Possible values include: 'PartitionSchemePartitionSchemeDescription', 'PartitionSchemeNamed', 'PartitionSchemeSingleton', 'PartitionSchemeUniformInt64Range'
- PartitionScheme PartitionSchemeBasicPartitionSchemeDescription `json:"PartitionScheme,omitempty"`
+ PartitionScheme PartitionSchemeBasicPartitionSchemeDescription `json:"partitionScheme,omitempty"`
}
// MarshalJSON is the custom marshaler for UniformInt64RangePartitionSchemeDescription.
@@ -3988,7 +4208,7 @@ func (ui6rpsd UniformInt64RangePartitionSchemeDescription) MarshalJSON() ([]byte
objectMap["HighKey"] = ui6rpsd.HighKey
}
if ui6rpsd.PartitionScheme != "" {
- objectMap["PartitionScheme"] = ui6rpsd.PartitionScheme
+ objectMap["partitionScheme"] = ui6rpsd.PartitionScheme
}
return json.Marshal(objectMap)
}
diff --git a/services/servicefabric/mgmt/2019-03-01/servicefabric/operations.go b/services/servicefabric/mgmt/2019-03-01/servicefabric/operations.go
index aa4d64183cf9..b73bec1e42c9 100644
--- a/services/servicefabric/mgmt/2019-03-01/servicefabric/operations.go
+++ b/services/servicefabric/mgmt/2019-03-01/servicefabric/operations.go
@@ -76,7 +76,7 @@ func (client OperationsClient) List(ctx context.Context) (result OperationListRe
// ListPreparer prepares the List request.
func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) {
- const APIVersion = ""
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -92,8 +92,8 @@ func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request,
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) {
- return autorest.SendWithSender(client, req,
- autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
+ return autorest.SendWithSender(client, req, sd...)
}
// ListResponder handles the response to the List request. The method always
diff --git a/services/servicefabric/mgmt/2019-03-01/servicefabric/servicefabricapi/interfaces.go b/services/servicefabric/mgmt/2019-03-01/servicefabric/servicefabricapi/interfaces.go
index 2163b5a64f60..c51278701ea2 100644
--- a/services/servicefabric/mgmt/2019-03-01/servicefabric/servicefabricapi/interfaces.go
+++ b/services/servicefabric/mgmt/2019-03-01/servicefabric/servicefabricapi/interfaces.go
@@ -25,7 +25,7 @@ import (
// ClustersClientAPI contains the set of methods on the ClustersClient type.
type ClustersClientAPI interface {
- Create(ctx context.Context, resourceGroupName string, clusterName string, parameters servicefabric.Cluster) (result servicefabric.ClustersCreateFuture, err error)
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, clusterName string, parameters servicefabric.Cluster) (result servicefabric.ClustersCreateOrUpdateFuture, err error)
Delete(ctx context.Context, resourceGroupName string, clusterName string) (result autorest.Response, err error)
Get(ctx context.Context, resourceGroupName string, clusterName string) (result servicefabric.Cluster, err error)
List(ctx context.Context) (result servicefabric.ClusterListResult, err error)
@@ -54,7 +54,7 @@ var _ OperationsClientAPI = (*servicefabric.OperationsClient)(nil)
// ApplicationTypesClientAPI contains the set of methods on the ApplicationTypesClient type.
type ApplicationTypesClientAPI interface {
- Create(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, parameters servicefabric.ApplicationTypeResource) (result servicefabric.ApplicationTypeResource, err error)
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, parameters servicefabric.ApplicationTypeResource) (result servicefabric.ApplicationTypeResource, err error)
Delete(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string) (result servicefabric.ApplicationTypesDeleteFuture, err error)
Get(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string) (result servicefabric.ApplicationTypeResource, err error)
List(ctx context.Context, resourceGroupName string, clusterName string) (result servicefabric.ApplicationTypeResourceList, err error)
@@ -64,7 +64,7 @@ var _ ApplicationTypesClientAPI = (*servicefabric.ApplicationTypesClient)(nil)
// ApplicationTypeVersionsClientAPI contains the set of methods on the ApplicationTypeVersionsClient type.
type ApplicationTypeVersionsClientAPI interface {
- Create(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, version string, parameters servicefabric.ApplicationTypeVersionResource) (result servicefabric.ApplicationTypeVersionsCreateFuture, err error)
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, version string, parameters servicefabric.ApplicationTypeVersionResource) (result servicefabric.ApplicationTypeVersionsCreateOrUpdateFuture, err error)
Delete(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, version string) (result servicefabric.ApplicationTypeVersionsDeleteFuture, err error)
Get(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, version string) (result servicefabric.ApplicationTypeVersionResource, err error)
List(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string) (result servicefabric.ApplicationTypeVersionResourceList, err error)
@@ -74,7 +74,7 @@ var _ ApplicationTypeVersionsClientAPI = (*servicefabric.ApplicationTypeVersions
// ApplicationsClientAPI contains the set of methods on the ApplicationsClient type.
type ApplicationsClientAPI interface {
- Create(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, parameters servicefabric.ApplicationResource) (result servicefabric.ApplicationsCreateFuture, err error)
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, parameters servicefabric.ApplicationResource) (result servicefabric.ApplicationsCreateOrUpdateFuture, err error)
Delete(ctx context.Context, resourceGroupName string, clusterName string, applicationName string) (result servicefabric.ApplicationsDeleteFuture, err error)
Get(ctx context.Context, resourceGroupName string, clusterName string, applicationName string) (result servicefabric.ApplicationResource, err error)
List(ctx context.Context, resourceGroupName string, clusterName string) (result servicefabric.ApplicationResourceList, err error)
@@ -85,7 +85,7 @@ var _ ApplicationsClientAPI = (*servicefabric.ApplicationsClient)(nil)
// ServicesClientAPI contains the set of methods on the ServicesClient type.
type ServicesClientAPI interface {
- Create(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, serviceName string, parameters servicefabric.ServiceResource) (result servicefabric.ServicesCreateFuture, err error)
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, serviceName string, parameters servicefabric.ServiceResource) (result servicefabric.ServicesCreateOrUpdateFuture, err error)
Delete(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, serviceName string) (result servicefabric.ServicesDeleteFuture, err error)
Get(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, serviceName string) (result servicefabric.ServiceResource, err error)
List(ctx context.Context, resourceGroupName string, clusterName string, applicationName string) (result servicefabric.ServiceResourceList, err error)
diff --git a/services/servicefabric/mgmt/2019-03-01/servicefabric/services.go b/services/servicefabric/mgmt/2019-03-01/servicefabric/services.go
index 10016d12bd83..1ef4a2539bbd 100644
--- a/services/servicefabric/mgmt/2019-03-01/servicefabric/services.go
+++ b/services/servicefabric/mgmt/2019-03-01/servicefabric/services.go
@@ -40,16 +40,16 @@ func NewServicesClientWithBaseURI(baseURI string, subscriptionID string) Service
return ServicesClient{NewWithBaseURI(baseURI, subscriptionID)}
}
-// Create create or update a Service Fabric service resource with the specified name.
+// CreateOrUpdate create or update a Service Fabric service resource with the specified name.
// Parameters:
// resourceGroupName - the name of the resource group.
// clusterName - the name of the cluster resource.
// applicationName - the name of the application resource.
// serviceName - the name of the service resource in the format of {applicationName}~{serviceName}.
// parameters - the service resource.
-func (client ServicesClient) Create(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, serviceName string, parameters ServiceResource) (result ServicesCreateFuture, err error) {
+func (client ServicesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, serviceName string, parameters ServiceResource) (result ServicesCreateOrUpdateFuture, err error) {
if tracing.IsEnabled() {
- ctx = tracing.StartSpan(ctx, fqdn+"/ServicesClient.Create")
+ ctx = tracing.StartSpan(ctx, fqdn+"/ServicesClient.CreateOrUpdate")
defer func() {
sc := -1
if result.Response() != nil {
@@ -58,23 +58,23 @@ func (client ServicesClient) Create(ctx context.Context, resourceGroupName strin
tracing.EndSpan(ctx, sc, err)
}()
}
- req, err := client.CreatePreparer(ctx, resourceGroupName, clusterName, applicationName, serviceName, parameters)
+ req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, clusterName, applicationName, serviceName, parameters)
if err != nil {
- err = autorest.NewErrorWithError(err, "servicefabric.ServicesClient", "Create", nil, "Failure preparing request")
+ err = autorest.NewErrorWithError(err, "servicefabric.ServicesClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
- result, err = client.CreateSender(req)
+ result, err = client.CreateOrUpdateSender(req)
if err != nil {
- err = autorest.NewErrorWithError(err, "servicefabric.ServicesClient", "Create", result.Response(), "Failure sending request")
+ err = autorest.NewErrorWithError(err, "servicefabric.ServicesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
return
}
return
}
-// CreatePreparer prepares the Create request.
-func (client ServicesClient) CreatePreparer(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, serviceName string, parameters ServiceResource) (*http.Request, error) {
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client ServicesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, clusterName string, applicationName string, serviceName string, parameters ServiceResource) (*http.Request, error) {
pathParameters := map[string]interface{}{
"applicationName": autorest.Encode("path", applicationName),
"clusterName": autorest.Encode("path", clusterName),
@@ -83,7 +83,7 @@ func (client ServicesClient) CreatePreparer(ctx context.Context, resourceGroupNa
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2019-03-01-preview"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -98,12 +98,12 @@ func (client ServicesClient) CreatePreparer(ctx context.Context, resourceGroupNa
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
-// CreateSender sends the Create request. The method will close the
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
-func (client ServicesClient) CreateSender(req *http.Request) (future ServicesCreateFuture, err error) {
+func (client ServicesClient) CreateOrUpdateSender(req *http.Request) (future ServicesCreateOrUpdateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
var resp *http.Response
- resp, err = autorest.SendWithSender(client, req,
- azure.DoRetryWithRegistration(client.Client))
+ resp, err = autorest.SendWithSender(client, req, sd...)
if err != nil {
return
}
@@ -111,9 +111,9 @@ func (client ServicesClient) CreateSender(req *http.Request) (future ServicesCre
return
}
-// CreateResponder handles the response to the Create request. The method always
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
-func (client ServicesClient) CreateResponder(resp *http.Response) (result ServiceResource, err error) {
+func (client ServicesClient) CreateOrUpdateResponder(resp *http.Response) (result ServiceResource, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
@@ -166,7 +166,7 @@ func (client ServicesClient) DeletePreparer(ctx context.Context, resourceGroupNa
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2019-03-01-preview"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -182,9 +182,9 @@ func (client ServicesClient) DeletePreparer(ctx context.Context, resourceGroupNa
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client ServicesClient) DeleteSender(req *http.Request) (future ServicesDeleteFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
var resp *http.Response
- resp, err = autorest.SendWithSender(client, req,
- azure.DoRetryWithRegistration(client.Client))
+ resp, err = autorest.SendWithSender(client, req, sd...)
if err != nil {
return
}
@@ -253,7 +253,7 @@ func (client ServicesClient) GetPreparer(ctx context.Context, resourceGroupName
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2019-03-01-preview"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -269,8 +269,8 @@ func (client ServicesClient) GetPreparer(ctx context.Context, resourceGroupName
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client ServicesClient) GetSender(req *http.Request) (*http.Response, error) {
- return autorest.SendWithSender(client, req,
- azure.DoRetryWithRegistration(client.Client))
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
}
// GetResponder handles the response to the Get request. The method always
@@ -333,7 +333,7 @@ func (client ServicesClient) ListPreparer(ctx context.Context, resourceGroupName
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2019-03-01-preview"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -349,8 +349,8 @@ func (client ServicesClient) ListPreparer(ctx context.Context, resourceGroupName
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client ServicesClient) ListSender(req *http.Request) (*http.Response, error) {
- return autorest.SendWithSender(client, req,
- azure.DoRetryWithRegistration(client.Client))
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
}
// ListResponder handles the response to the List request. The method always
@@ -409,7 +409,7 @@ func (client ServicesClient) UpdatePreparer(ctx context.Context, resourceGroupNa
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
- const APIVersion = "2019-03-01-preview"
+ const APIVersion = "2019-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@@ -427,9 +427,9 @@ func (client ServicesClient) UpdatePreparer(ctx context.Context, resourceGroupNa
// UpdateSender sends the Update request. The method will close the
// http.Response Body if it receives an error.
func (client ServicesClient) UpdateSender(req *http.Request) (future ServicesUpdateFuture, err error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
var resp *http.Response
- resp, err = autorest.SendWithSender(client, req,
- azure.DoRetryWithRegistration(client.Client))
+ resp, err = autorest.SendWithSender(client, req, sd...)
if err != nil {
return
}
@@ -439,7 +439,7 @@ func (client ServicesClient) UpdateSender(req *http.Request) (future ServicesUpd
// UpdateResponder handles the response to the Update request. The method always
// closes the http.Response Body.
-func (client ServicesClient) UpdateResponder(resp *http.Response) (result ServiceResourceUpdate, err error) {
+func (client ServicesClient) UpdateResponder(resp *http.Response) (result ServiceResource, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
diff --git a/services/storage/mgmt/2019-04-01/storage/blobservices.go b/services/storage/mgmt/2019-04-01/storage/blobservices.go
index 1ae2999d2049..e9eb9cb0d942 100644
--- a/services/storage/mgmt/2019-04-01/storage/blobservices.go
+++ b/services/storage/mgmt/2019-04-01/storage/blobservices.go
@@ -135,6 +135,98 @@ func (client BlobServicesClient) GetServicePropertiesResponder(resp *http.Respon
return
}
+// List list blob services of storage account. It returns a collection of one object named default.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client BlobServicesClient) List(ctx context.Context, resourceGroupName string, accountName string) (result BlobServiceItems, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/BlobServicesClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.BlobServicesClient", "List", err.Error())
+ }
+
+ req, err := client.ListPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.BlobServicesClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client BlobServicesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-04-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client BlobServicesClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client BlobServicesClient) ListResponder(resp *http.Response) (result BlobServiceItems, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
// SetServiceProperties sets the properties of a storage account’s Blob service, including properties for Storage
// Analytics and CORS (Cross-Origin Resource Sharing) rules.
// Parameters:
diff --git a/services/storage/mgmt/2019-04-01/storage/fileservices.go b/services/storage/mgmt/2019-04-01/storage/fileservices.go
new file mode 100644
index 000000000000..8de550d6af1b
--- /dev/null
+++ b/services/storage/mgmt/2019-04-01/storage/fileservices.go
@@ -0,0 +1,326 @@
+package storage
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// FileServicesClient is the the Azure Storage Management API.
+type FileServicesClient struct {
+ BaseClient
+}
+
+// NewFileServicesClient creates an instance of the FileServicesClient client.
+func NewFileServicesClient(subscriptionID string) FileServicesClient {
+ return NewFileServicesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewFileServicesClientWithBaseURI creates an instance of the FileServicesClient client.
+func NewFileServicesClientWithBaseURI(baseURI string, subscriptionID string) FileServicesClient {
+ return FileServicesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// GetServiceProperties gets the properties of file services in storage accounts, including CORS (Cross-Origin Resource
+// Sharing) rules.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client FileServicesClient) GetServiceProperties(ctx context.Context, resourceGroupName string, accountName string) (result FileServiceProperties, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileServicesClient.GetServiceProperties")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.FileServicesClient", "GetServiceProperties", err.Error())
+ }
+
+ req, err := client.GetServicePropertiesPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "GetServiceProperties", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetServicePropertiesSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "GetServiceProperties", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetServicePropertiesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "GetServiceProperties", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetServicePropertiesPreparer prepares the GetServiceProperties request.
+func (client FileServicesClient) GetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "FileServicesName": autorest.Encode("path", "default"),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-04-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/{FileServicesName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetServicePropertiesSender sends the GetServiceProperties request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileServicesClient) GetServicePropertiesSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetServicePropertiesResponder handles the response to the GetServiceProperties request. The method always
+// closes the http.Response Body.
+func (client FileServicesClient) GetServicePropertiesResponder(resp *http.Response) (result FileServiceProperties, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List list all file services in storage accounts
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+func (client FileServicesClient) List(ctx context.Context, resourceGroupName string, accountName string) (result FileServiceItems, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileServicesClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.FileServicesClient", "List", err.Error())
+ }
+
+ req, err := client.ListPreparer(ctx, resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client FileServicesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-04-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileServicesClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client FileServicesClient) ListResponder(resp *http.Response) (result FileServiceItems, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// SetServiceProperties sets the properties of file services in storage accounts, including CORS (Cross-Origin Resource
+// Sharing) rules.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// parameters - the properties of file services in storage accounts, including CORS (Cross-Origin Resource
+// Sharing) rules.
+func (client FileServicesClient) SetServiceProperties(ctx context.Context, resourceGroupName string, accountName string, parameters FileServiceProperties) (result FileServiceProperties, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileServicesClient.SetServiceProperties")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.FileServicesClient", "SetServiceProperties", err.Error())
+ }
+
+ req, err := client.SetServicePropertiesPreparer(ctx, resourceGroupName, accountName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "SetServiceProperties", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.SetServicePropertiesSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "SetServiceProperties", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.SetServicePropertiesResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileServicesClient", "SetServiceProperties", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// SetServicePropertiesPreparer prepares the SetServiceProperties request.
+func (client FileServicesClient) SetServicePropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string, parameters FileServiceProperties) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "FileServicesName": autorest.Encode("path", "default"),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-04-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/{FileServicesName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// SetServicePropertiesSender sends the SetServiceProperties request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileServicesClient) SetServicePropertiesSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// SetServicePropertiesResponder handles the response to the SetServiceProperties request. The method always
+// closes the http.Response Body.
+func (client FileServicesClient) SetServicePropertiesResponder(resp *http.Response) (result FileServiceProperties, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/storage/mgmt/2019-04-01/storage/fileshares.go b/services/storage/mgmt/2019-04-01/storage/fileshares.go
new file mode 100644
index 000000000000..560e3a1033af
--- /dev/null
+++ b/services/storage/mgmt/2019-04-01/storage/fileshares.go
@@ -0,0 +1,594 @@
+package storage
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "context"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
+ "net/http"
+)
+
+// FileSharesClient is the the Azure Storage Management API.
+type FileSharesClient struct {
+ BaseClient
+}
+
+// NewFileSharesClient creates an instance of the FileSharesClient client.
+func NewFileSharesClient(subscriptionID string) FileSharesClient {
+ return NewFileSharesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewFileSharesClientWithBaseURI creates an instance of the FileSharesClient client.
+func NewFileSharesClientWithBaseURI(baseURI string, subscriptionID string) FileSharesClient {
+ return FileSharesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// Create creates a new share under the specified account as described by request body. The share resource includes
+// metadata and properties for that share. It does not include a list of the files contained by the share.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// shareName - the name of the file share within the specified storage account. File share names must be
+// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-)
+// character must be immediately preceded and followed by a letter or number.
+// fileShare - properties of the file share to create.
+func (client FileSharesClient) Create(ctx context.Context, resourceGroupName string, accountName string, shareName string, fileShare FileShare) (result FileShare, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Create")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: shareName,
+ Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: fileShare,
+ Constraints: []validation.Constraint{{Target: "fileShare.FileShareProperties", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "fileShare.FileShareProperties.ShareQuota", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "fileShare.FileShareProperties.ShareQuota", Name: validation.InclusiveMaximum, Rule: int64(5120), Chain: nil},
+ {Target: "fileShare.FileShareProperties.ShareQuota", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
+ }},
+ }}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.FileSharesClient", "Create", err.Error())
+ }
+
+ req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, shareName, fileShare)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Create", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Create", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Create", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreatePreparer prepares the Create request.
+func (client FileSharesClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string, fileShare FileShare) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "shareName": autorest.Encode("path", shareName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-04-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}", pathParameters),
+ autorest.WithJSON(fileShare),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateSender sends the Create request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileSharesClient) CreateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// CreateResponder handles the response to the Create request. The method always
+// closes the http.Response Body.
+func (client FileSharesClient) CreateResponder(resp *http.Response) (result FileShare, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes specified share under its account.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// shareName - the name of the file share within the specified storage account. File share names must be
+// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-)
+// character must be immediately preceded and followed by a letter or number.
+func (client FileSharesClient) Delete(ctx context.Context, resourceGroupName string, accountName string, shareName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: shareName,
+ Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.FileSharesClient", "Delete", err.Error())
+ }
+
+ req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, shareName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client FileSharesClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "shareName": autorest.Encode("path", shareName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-04-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileSharesClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client FileSharesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets properties of a specified share.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// shareName - the name of the file share within the specified storage account. File share names must be
+// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-)
+// character must be immediately preceded and followed by a letter or number.
+func (client FileSharesClient) Get(ctx context.Context, resourceGroupName string, accountName string, shareName string) (result FileShare, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: shareName,
+ Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.FileSharesClient", "Get", err.Error())
+ }
+
+ req, err := client.GetPreparer(ctx, resourceGroupName, accountName, shareName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client FileSharesClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "shareName": autorest.Encode("path", shareName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-04-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileSharesClient) GetSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client FileSharesClient) GetResponder(resp *http.Response) (result FileShare, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List lists all shares.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// skipToken - optional. Continuation token for the list operation.
+// maxpagesize - optional. Specified maximum number of shares that can be included in the list.
+// filter - optional. When specified, only share names starting with the filter will be listed.
+func (client FileSharesClient) List(ctx context.Context, resourceGroupName string, accountName string, skipToken string, maxpagesize string, filter string) (result FileShareItemsPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.List")
+ defer func() {
+ sc := -1
+ if result.fsi.Response.Response != nil {
+ sc = result.fsi.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.FileSharesClient", "List", err.Error())
+ }
+
+ result.fn = client.listNextResults
+ req, err := client.ListPreparer(ctx, resourceGroupName, accountName, skipToken, maxpagesize, filter)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.fsi.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result.fsi, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client FileSharesClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string, skipToken string, maxpagesize string, filter string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-04-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if len(skipToken) > 0 {
+ queryParameters["$skipToken"] = autorest.Encode("query", skipToken)
+ }
+ if len(maxpagesize) > 0 {
+ queryParameters["$maxpagesize"] = autorest.Encode("query", maxpagesize)
+ }
+ if len(filter) > 0 {
+ queryParameters["$filter"] = autorest.Encode("query", filter)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileSharesClient) ListSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client FileSharesClient) ListResponder(resp *http.Response) (result FileShareItems, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// listNextResults retrieves the next set of results, if any.
+func (client FileSharesClient) listNextResults(ctx context.Context, lastResults FileShareItems) (result FileShareItems, err error) {
+ req, err := lastResults.fileShareItemsPreparer(ctx)
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "storage.FileSharesClient", "listNextResults", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "storage.FileSharesClient", "listNextResults", resp, "Failure sending next results request")
+ }
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "listNextResults", resp, "Failure responding to next results request")
+ }
+ return
+}
+
+// ListComplete enumerates all values, automatically crossing page boundaries as required.
+func (client FileSharesClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string, skipToken string, maxpagesize string, filter string) (result FileShareItemsIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ result.page, err = client.List(ctx, resourceGroupName, accountName, skipToken, maxpagesize, filter)
+ return
+}
+
+// Update updates share properties as specified in request body. Properties not mentioned in the request will not be
+// changed. Update fails if the specified share does not already exist.
+// Parameters:
+// resourceGroupName - the name of the resource group within the user's subscription. The name is case
+// insensitive.
+// accountName - the name of the storage account within the specified resource group. Storage account names
+// must be between 3 and 24 characters in length and use numbers and lower-case letters only.
+// shareName - the name of the file share within the specified storage account. File share names must be
+// between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-)
+// character must be immediately preceded and followed by a letter or number.
+// fileShare - properties to update for the file share.
+func (client FileSharesClient) Update(ctx context.Context, resourceGroupName string, accountName string, shareName string, fileShare FileShare) (result FileShare, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileSharesClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: shareName,
+ Constraints: []validation.Constraint{{Target: "shareName", Name: validation.MaxLength, Rule: 63, Chain: nil},
+ {Target: "shareName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: client.SubscriptionID,
+ Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("storage.FileSharesClient", "Update", err.Error())
+ }
+
+ req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, shareName, fileShare)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Update", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.UpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Update", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.UpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "storage.FileSharesClient", "Update", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// UpdatePreparer prepares the Update request.
+func (client FileSharesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, shareName string, fileShare FileShare) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "shareName": autorest.Encode("path", shareName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2019-04-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPatch(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}", pathParameters),
+ autorest.WithJSON(fileShare),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// UpdateSender sends the Update request. The method will close the
+// http.Response Body if it receives an error.
+func (client FileSharesClient) UpdateSender(req *http.Request) (*http.Response, error) {
+ sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
+ return autorest.SendWithSender(client, req, sd...)
+}
+
+// UpdateResponder handles the response to the Update request. The method always
+// closes the http.Response Body.
+func (client FileSharesClient) UpdateResponder(resp *http.Response) (result FileShare, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/services/storage/mgmt/2019-04-01/storage/models.go b/services/storage/mgmt/2019-04-01/storage/models.go
index 83ae6dded4ec..4fed29dbe280 100644
--- a/services/storage/mgmt/2019-04-01/storage/models.go
+++ b/services/storage/mgmt/2019-04-01/storage/models.go
@@ -878,7 +878,7 @@ type AccountProperties struct {
type AccountPropertiesCreateParameters struct {
// CustomDomain - User domain assigned to the storage account. Name is the CNAME source. Only one custom domain is supported per storage account at this time. To clear the existing custom domain, use an empty string for the custom domain name property.
CustomDomain *CustomDomain `json:"customDomain,omitempty"`
- // Encryption - Provides the encryption settings on the account. If left unspecified the account encryption settings will remain the same. The default setting is unencrypted.
+ // Encryption - Not applicable. Azure Storage encryption is enabled for all storage accounts and cannot be disabled.
Encryption *Encryption `json:"encryption,omitempty"`
// NetworkRuleSet - Network rule set
NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"`
@@ -1184,6 +1184,13 @@ func (bc *BlobContainer) UnmarshalJSON(body []byte) error {
return nil
}
+// BlobServiceItems ...
+type BlobServiceItems struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; List of blob services returned.
+ Value *[]BlobServiceProperties `json:"value,omitempty"`
+}
+
// BlobServiceProperties the properties of a storage account’s Blob service.
type BlobServiceProperties struct {
autorest.Response `json:"-"`
@@ -1419,6 +1426,445 @@ type Endpoints struct {
Dfs *string `json:"dfs,omitempty"`
}
+// ErrorResponse an error response from the Storage service.
+type ErrorResponse struct {
+ Error *ErrorResponseBody `json:"error,omitempty"`
+}
+
+// ErrorResponseBody an error response from the Storage service.
+type ErrorResponseBody struct {
+ // Code - An identifier for the error. Codes are invariant and are intended to be consumed programmatically.
+ Code *string `json:"code,omitempty"`
+ // Message - A message describing the error, intended to be suitable for display in a user interface.
+ Message *string `json:"message,omitempty"`
+ // Target - The target of the particular error. For example, the name of the property in error.
+ Target *string `json:"target,omitempty"`
+ // Details - A list of additional details about the error.
+ Details *[]ErrorResponseBody `json:"details,omitempty"`
+}
+
+// FileServiceItems ...
+type FileServiceItems struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; List of file services returned.
+ Value *[]FileServiceProperties `json:"value,omitempty"`
+}
+
+// FileServiceProperties the properties of File services in storage account.
+type FileServiceProperties struct {
+ autorest.Response `json:"-"`
+ // FileServicePropertiesProperties - The properties of File services in storage account.
+ *FileServicePropertiesProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for FileServiceProperties.
+func (fsp FileServiceProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if fsp.FileServicePropertiesProperties != nil {
+ objectMap["properties"] = fsp.FileServicePropertiesProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for FileServiceProperties struct.
+func (fsp *FileServiceProperties) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var fileServiceProperties FileServicePropertiesProperties
+ err = json.Unmarshal(*v, &fileServiceProperties)
+ if err != nil {
+ return err
+ }
+ fsp.FileServicePropertiesProperties = &fileServiceProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ fsp.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ fsp.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ fsp.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// FileServicePropertiesProperties the properties of File services in storage account.
+type FileServicePropertiesProperties struct {
+ // Cors - Specifies CORS rules for the File service. You can include up to five CorsRule elements in the request. If no CorsRule elements are included in the request body, all CORS rules will be deleted, and CORS will be disabled for the File service.
+ Cors *CorsRules `json:"cors,omitempty"`
+}
+
+// FileShare properties of the file share, including Id, resource name, resource type, Etag.
+type FileShare struct {
+ autorest.Response `json:"-"`
+ // FileShareProperties - Properties of the file share.
+ *FileShareProperties `json:"properties,omitempty"`
+ // Etag - READ-ONLY; Resource Etag.
+ Etag *string `json:"etag,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for FileShare.
+func (fs FileShare) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if fs.FileShareProperties != nil {
+ objectMap["properties"] = fs.FileShareProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for FileShare struct.
+func (fs *FileShare) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var fileShareProperties FileShareProperties
+ err = json.Unmarshal(*v, &fileShareProperties)
+ if err != nil {
+ return err
+ }
+ fs.FileShareProperties = &fileShareProperties
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ fs.Etag = &etag
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ fs.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ fs.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ fs.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// FileShareItem the file share properties be listed out.
+type FileShareItem struct {
+ // FileShareProperties - The file share properties be listed out.
+ *FileShareProperties `json:"properties,omitempty"`
+ // Etag - READ-ONLY; Resource Etag.
+ Etag *string `json:"etag,omitempty"`
+ // ID - READ-ONLY; Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; The name of the resource
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for FileShareItem.
+func (fsi FileShareItem) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if fsi.FileShareProperties != nil {
+ objectMap["properties"] = fsi.FileShareProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for FileShareItem struct.
+func (fsi *FileShareItem) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var fileShareProperties FileShareProperties
+ err = json.Unmarshal(*v, &fileShareProperties)
+ if err != nil {
+ return err
+ }
+ fsi.FileShareProperties = &fileShareProperties
+ }
+ case "etag":
+ if v != nil {
+ var etag string
+ err = json.Unmarshal(*v, &etag)
+ if err != nil {
+ return err
+ }
+ fsi.Etag = &etag
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ fsi.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ fsi.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ fsi.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// FileShareItems response schema. Contains list of shares returned, and if paging is requested or
+// required, a URL to next page of shares.
+type FileShareItems struct {
+ autorest.Response `json:"-"`
+ // Value - READ-ONLY; List of file shares returned.
+ Value *[]FileShareItem `json:"value,omitempty"`
+ // NextLink - READ-ONLY; Request URL that can be used to query next page of shares. Returned when total number of requested shares exceed maximum page size.
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// FileShareItemsIterator provides access to a complete listing of FileShareItem values.
+type FileShareItemsIterator struct {
+ i int
+ page FileShareItemsPage
+}
+
+// NextWithContext advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *FileShareItemsIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileShareItemsIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err = iter.page.NextWithContext(ctx)
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *FileShareItemsIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter FileShareItemsIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter FileShareItemsIterator) Response() FileShareItems {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter FileShareItemsIterator) Value() FileShareItem {
+ if !iter.page.NotDone() {
+ return FileShareItem{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// Creates a new instance of the FileShareItemsIterator type.
+func NewFileShareItemsIterator(page FileShareItemsPage) FileShareItemsIterator {
+ return FileShareItemsIterator{page: page}
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (fsi FileShareItems) IsEmpty() bool {
+ return fsi.Value == nil || len(*fsi.Value) == 0
+}
+
+// fileShareItemsPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (fsi FileShareItems) fileShareItemsPreparer(ctx context.Context) (*http.Request, error) {
+ if fsi.NextLink == nil || len(to.String(fsi.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(fsi.NextLink)))
+}
+
+// FileShareItemsPage contains a page of FileShareItem values.
+type FileShareItemsPage struct {
+ fn func(context.Context, FileShareItems) (FileShareItems, error)
+ fsi FileShareItems
+}
+
+// NextWithContext advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *FileShareItemsPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/FileShareItemsPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.fsi)
+ if err != nil {
+ return err
+ }
+ page.fsi = next
+ return nil
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *FileShareItemsPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page FileShareItemsPage) NotDone() bool {
+ return !page.fsi.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page FileShareItemsPage) Response() FileShareItems {
+ return page.fsi
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page FileShareItemsPage) Values() []FileShareItem {
+ if page.fsi.IsEmpty() {
+ return nil
+ }
+ return *page.fsi.Value
+}
+
+// Creates a new instance of the FileShareItemsPage type.
+func NewFileShareItemsPage(getNextPage func(context.Context, FileShareItems) (FileShareItems, error)) FileShareItemsPage {
+ return FileShareItemsPage{fn: getNextPage}
+}
+
+// FileShareProperties the properties of the file share.
+type FileShareProperties struct {
+ // LastModifiedTime - READ-ONLY; Returns the date and time the share was last modified.
+ LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"`
+ // Metadata - A name-value pair to associate with the share as metadata.
+ Metadata map[string]*string `json:"metadata"`
+ // ShareQuota - The maximum size of the share, in gigabytes. Must be greater than 0, and less than or equal to 5TB (5120).
+ ShareQuota *int32 `json:"shareQuota,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for FileShareProperties.
+func (fsp FileShareProperties) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if fsp.Metadata != nil {
+ objectMap["metadata"] = fsp.Metadata
+ }
+ if fsp.ShareQuota != nil {
+ objectMap["shareQuota"] = fsp.ShareQuota
+ }
+ return json.Marshal(objectMap)
+}
+
// GeoReplicationStats statistics related to replication for storage account's Blob, Table, Queue and File
// services. It is only available when geo-redundant replication is enabled for the storage account.
type GeoReplicationStats struct {
diff --git a/services/storage/mgmt/2019-04-01/storage/storageapi/interfaces.go b/services/storage/mgmt/2019-04-01/storage/storageapi/interfaces.go
index 38f976f21072..f4af674cd8bd 100644
--- a/services/storage/mgmt/2019-04-01/storage/storageapi/interfaces.go
+++ b/services/storage/mgmt/2019-04-01/storage/storageapi/interfaces.go
@@ -75,6 +75,7 @@ var _ ManagementPoliciesClientAPI = (*storage.ManagementPoliciesClient)(nil)
// BlobServicesClientAPI contains the set of methods on the BlobServicesClient type.
type BlobServicesClientAPI interface {
GetServiceProperties(ctx context.Context, resourceGroupName string, accountName string) (result storage.BlobServiceProperties, err error)
+ List(ctx context.Context, resourceGroupName string, accountName string) (result storage.BlobServiceItems, err error)
SetServiceProperties(ctx context.Context, resourceGroupName string, accountName string, parameters storage.BlobServiceProperties) (result storage.BlobServiceProperties, err error)
}
@@ -98,3 +99,23 @@ type BlobContainersClientAPI interface {
}
var _ BlobContainersClientAPI = (*storage.BlobContainersClient)(nil)
+
+// FileServicesClientAPI contains the set of methods on the FileServicesClient type.
+type FileServicesClientAPI interface {
+ GetServiceProperties(ctx context.Context, resourceGroupName string, accountName string) (result storage.FileServiceProperties, err error)
+ List(ctx context.Context, resourceGroupName string, accountName string) (result storage.FileServiceItems, err error)
+ SetServiceProperties(ctx context.Context, resourceGroupName string, accountName string, parameters storage.FileServiceProperties) (result storage.FileServiceProperties, err error)
+}
+
+var _ FileServicesClientAPI = (*storage.FileServicesClient)(nil)
+
+// FileSharesClientAPI contains the set of methods on the FileSharesClient type.
+type FileSharesClientAPI interface {
+ Create(ctx context.Context, resourceGroupName string, accountName string, shareName string, fileShare storage.FileShare) (result storage.FileShare, err error)
+ Delete(ctx context.Context, resourceGroupName string, accountName string, shareName string) (result autorest.Response, err error)
+ Get(ctx context.Context, resourceGroupName string, accountName string, shareName string) (result storage.FileShare, err error)
+ List(ctx context.Context, resourceGroupName string, accountName string, skipToken string, maxpagesize string, filter string) (result storage.FileShareItemsPage, err error)
+ Update(ctx context.Context, resourceGroupName string, accountName string, shareName string, fileShare storage.FileShare) (result storage.FileShare, err error)
+}
+
+var _ FileSharesClientAPI = (*storage.FileSharesClient)(nil)
diff --git a/services/web/mgmt/2016-09-01/web/diagnostics.go b/services/web/mgmt/2016-09-01/web/diagnostics.go
index 891768c64c57..7ed6bf327b17 100644
--- a/services/web/mgmt/2016-09-01/web/diagnostics.go
+++ b/services/web/mgmt/2016-09-01/web/diagnostics.go
@@ -68,7 +68,8 @@ func (client DiagnosticsClient) ExecuteSiteAnalysis(ctx context.Context, resourc
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+[^\.]$`, Chain: nil}}},
{TargetValue: timeGrain,
- Constraints: []validation.Constraint{{Target: "timeGrain", Name: validation.Pattern, Rule: `PT[1-9][0-9]+[SMH]`, Chain: nil}}}}); err != nil {
+ Constraints: []validation.Constraint{{Target: "timeGrain", Name: validation.Empty, Rule: false,
+ Chain: []validation.Constraint{{Target: "timeGrain", Name: validation.Pattern, Rule: `PT[1-9][0-9]+[SMH]`, Chain: nil}}}}}}); err != nil {
return result, validation.NewError("web.DiagnosticsClient", "ExecuteSiteAnalysis", err.Error())
}
@@ -172,7 +173,8 @@ func (client DiagnosticsClient) ExecuteSiteAnalysisSlot(ctx context.Context, res
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+[^\.]$`, Chain: nil}}},
{TargetValue: timeGrain,
- Constraints: []validation.Constraint{{Target: "timeGrain", Name: validation.Pattern, Rule: `PT[1-9][0-9]+[SMH]`, Chain: nil}}}}); err != nil {
+ Constraints: []validation.Constraint{{Target: "timeGrain", Name: validation.Empty, Rule: false,
+ Chain: []validation.Constraint{{Target: "timeGrain", Name: validation.Pattern, Rule: `PT[1-9][0-9]+[SMH]`, Chain: nil}}}}}}); err != nil {
return result, validation.NewError("web.DiagnosticsClient", "ExecuteSiteAnalysisSlot", err.Error())
}
@@ -276,7 +278,8 @@ func (client DiagnosticsClient) ExecuteSiteDetector(ctx context.Context, resourc
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+[^\.]$`, Chain: nil}}},
{TargetValue: timeGrain,
- Constraints: []validation.Constraint{{Target: "timeGrain", Name: validation.Pattern, Rule: `PT[1-9][0-9]+[SMH]`, Chain: nil}}}}); err != nil {
+ Constraints: []validation.Constraint{{Target: "timeGrain", Name: validation.Empty, Rule: false,
+ Chain: []validation.Constraint{{Target: "timeGrain", Name: validation.Pattern, Rule: `PT[1-9][0-9]+[SMH]`, Chain: nil}}}}}}); err != nil {
return result, validation.NewError("web.DiagnosticsClient", "ExecuteSiteDetector", err.Error())
}
@@ -380,7 +383,8 @@ func (client DiagnosticsClient) ExecuteSiteDetectorSlot(ctx context.Context, res
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+[^\.]$`, Chain: nil}}},
{TargetValue: timeGrain,
- Constraints: []validation.Constraint{{Target: "timeGrain", Name: validation.Pattern, Rule: `PT[1-9][0-9]+[SMH]`, Chain: nil}}}}); err != nil {
+ Constraints: []validation.Constraint{{Target: "timeGrain", Name: validation.Empty, Rule: false,
+ Chain: []validation.Constraint{{Target: "timeGrain", Name: validation.Pattern, Rule: `PT[1-9][0-9]+[SMH]`, Chain: nil}}}}}}); err != nil {
return result, validation.NewError("web.DiagnosticsClient", "ExecuteSiteDetectorSlot", err.Error())
}
@@ -483,7 +487,8 @@ func (client DiagnosticsClient) GetHostingEnvironmentDetectorResponse(ctx contex
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+[^\.]$`, Chain: nil}}},
{TargetValue: timeGrain,
- Constraints: []validation.Constraint{{Target: "timeGrain", Name: validation.Pattern, Rule: `PT[1-9][0-9]+[SMH]`, Chain: nil}}}}); err != nil {
+ Constraints: []validation.Constraint{{Target: "timeGrain", Name: validation.Empty, Rule: false,
+ Chain: []validation.Constraint{{Target: "timeGrain", Name: validation.Pattern, Rule: `PT[1-9][0-9]+[SMH]`, Chain: nil}}}}}}); err != nil {
return result, validation.NewError("web.DiagnosticsClient", "GetHostingEnvironmentDetectorResponse", err.Error())
}
@@ -891,7 +896,8 @@ func (client DiagnosticsClient) GetSiteDetectorResponse(ctx context.Context, res
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+[^\.]$`, Chain: nil}}},
{TargetValue: timeGrain,
- Constraints: []validation.Constraint{{Target: "timeGrain", Name: validation.Pattern, Rule: `PT[1-9][0-9]+[SMH]`, Chain: nil}}}}); err != nil {
+ Constraints: []validation.Constraint{{Target: "timeGrain", Name: validation.Empty, Rule: false,
+ Chain: []validation.Constraint{{Target: "timeGrain", Name: validation.Pattern, Rule: `PT[1-9][0-9]+[SMH]`, Chain: nil}}}}}}); err != nil {
return result, validation.NewError("web.DiagnosticsClient", "GetSiteDetectorResponse", err.Error())
}
@@ -993,7 +999,8 @@ func (client DiagnosticsClient) GetSiteDetectorResponseSlot(ctx context.Context,
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+[^\.]$`, Chain: nil}}},
{TargetValue: timeGrain,
- Constraints: []validation.Constraint{{Target: "timeGrain", Name: validation.Pattern, Rule: `PT[1-9][0-9]+[SMH]`, Chain: nil}}}}); err != nil {
+ Constraints: []validation.Constraint{{Target: "timeGrain", Name: validation.Empty, Rule: false,
+ Chain: []validation.Constraint{{Target: "timeGrain", Name: validation.Pattern, Rule: `PT[1-9][0-9]+[SMH]`, Chain: nil}}}}}}); err != nil {
return result, validation.NewError("web.DiagnosticsClient", "GetSiteDetectorResponseSlot", err.Error())
}
diff --git a/services/web/mgmt/2018-02-01/web/diagnostics.go b/services/web/mgmt/2018-02-01/web/diagnostics.go
index 9a0ca4f2f181..b6c4d90884b1 100644
--- a/services/web/mgmt/2018-02-01/web/diagnostics.go
+++ b/services/web/mgmt/2018-02-01/web/diagnostics.go
@@ -68,7 +68,8 @@ func (client DiagnosticsClient) ExecuteSiteAnalysis(ctx context.Context, resourc
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+[^\.]$`, Chain: nil}}},
{TargetValue: timeGrain,
- Constraints: []validation.Constraint{{Target: "timeGrain", Name: validation.Pattern, Rule: `PT[1-9][0-9]+[SMH]`, Chain: nil}}}}); err != nil {
+ Constraints: []validation.Constraint{{Target: "timeGrain", Name: validation.Empty, Rule: false,
+ Chain: []validation.Constraint{{Target: "timeGrain", Name: validation.Pattern, Rule: `PT[1-9][0-9]+[SMH]`, Chain: nil}}}}}}); err != nil {
return result, validation.NewError("web.DiagnosticsClient", "ExecuteSiteAnalysis", err.Error())
}
@@ -172,7 +173,8 @@ func (client DiagnosticsClient) ExecuteSiteAnalysisSlot(ctx context.Context, res
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+[^\.]$`, Chain: nil}}},
{TargetValue: timeGrain,
- Constraints: []validation.Constraint{{Target: "timeGrain", Name: validation.Pattern, Rule: `PT[1-9][0-9]+[SMH]`, Chain: nil}}}}); err != nil {
+ Constraints: []validation.Constraint{{Target: "timeGrain", Name: validation.Empty, Rule: false,
+ Chain: []validation.Constraint{{Target: "timeGrain", Name: validation.Pattern, Rule: `PT[1-9][0-9]+[SMH]`, Chain: nil}}}}}}); err != nil {
return result, validation.NewError("web.DiagnosticsClient", "ExecuteSiteAnalysisSlot", err.Error())
}
@@ -276,7 +278,8 @@ func (client DiagnosticsClient) ExecuteSiteDetector(ctx context.Context, resourc
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+[^\.]$`, Chain: nil}}},
{TargetValue: timeGrain,
- Constraints: []validation.Constraint{{Target: "timeGrain", Name: validation.Pattern, Rule: `PT[1-9][0-9]+[SMH]`, Chain: nil}}}}); err != nil {
+ Constraints: []validation.Constraint{{Target: "timeGrain", Name: validation.Empty, Rule: false,
+ Chain: []validation.Constraint{{Target: "timeGrain", Name: validation.Pattern, Rule: `PT[1-9][0-9]+[SMH]`, Chain: nil}}}}}}); err != nil {
return result, validation.NewError("web.DiagnosticsClient", "ExecuteSiteDetector", err.Error())
}
@@ -380,7 +383,8 @@ func (client DiagnosticsClient) ExecuteSiteDetectorSlot(ctx context.Context, res
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+[^\.]$`, Chain: nil}}},
{TargetValue: timeGrain,
- Constraints: []validation.Constraint{{Target: "timeGrain", Name: validation.Pattern, Rule: `PT[1-9][0-9]+[SMH]`, Chain: nil}}}}); err != nil {
+ Constraints: []validation.Constraint{{Target: "timeGrain", Name: validation.Empty, Rule: false,
+ Chain: []validation.Constraint{{Target: "timeGrain", Name: validation.Pattern, Rule: `PT[1-9][0-9]+[SMH]`, Chain: nil}}}}}}); err != nil {
return result, validation.NewError("web.DiagnosticsClient", "ExecuteSiteDetectorSlot", err.Error())
}
@@ -483,7 +487,8 @@ func (client DiagnosticsClient) GetHostingEnvironmentDetectorResponse(ctx contex
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+[^\.]$`, Chain: nil}}},
{TargetValue: timeGrain,
- Constraints: []validation.Constraint{{Target: "timeGrain", Name: validation.Pattern, Rule: `PT[1-9][0-9]+[SMH]`, Chain: nil}}}}); err != nil {
+ Constraints: []validation.Constraint{{Target: "timeGrain", Name: validation.Empty, Rule: false,
+ Chain: []validation.Constraint{{Target: "timeGrain", Name: validation.Pattern, Rule: `PT[1-9][0-9]+[SMH]`, Chain: nil}}}}}}); err != nil {
return result, validation.NewError("web.DiagnosticsClient", "GetHostingEnvironmentDetectorResponse", err.Error())
}
@@ -891,7 +896,8 @@ func (client DiagnosticsClient) GetSiteDetectorResponse(ctx context.Context, res
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+[^\.]$`, Chain: nil}}},
{TargetValue: timeGrain,
- Constraints: []validation.Constraint{{Target: "timeGrain", Name: validation.Pattern, Rule: `PT[1-9][0-9]+[SMH]`, Chain: nil}}}}); err != nil {
+ Constraints: []validation.Constraint{{Target: "timeGrain", Name: validation.Empty, Rule: false,
+ Chain: []validation.Constraint{{Target: "timeGrain", Name: validation.Pattern, Rule: `PT[1-9][0-9]+[SMH]`, Chain: nil}}}}}}); err != nil {
return result, validation.NewError("web.DiagnosticsClient", "GetSiteDetectorResponse", err.Error())
}
@@ -993,7 +999,8 @@ func (client DiagnosticsClient) GetSiteDetectorResponseSlot(ctx context.Context,
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+[^\.]$`, Chain: nil}}},
{TargetValue: timeGrain,
- Constraints: []validation.Constraint{{Target: "timeGrain", Name: validation.Pattern, Rule: `PT[1-9][0-9]+[SMH]`, Chain: nil}}}}); err != nil {
+ Constraints: []validation.Constraint{{Target: "timeGrain", Name: validation.Empty, Rule: false,
+ Chain: []validation.Constraint{{Target: "timeGrain", Name: validation.Pattern, Rule: `PT[1-9][0-9]+[SMH]`, Chain: nil}}}}}}); err != nil {
return result, validation.NewError("web.DiagnosticsClient", "GetSiteDetectorResponseSlot", err.Error())
}
diff --git a/swagger_to_sdk_config.json b/swagger_to_sdk_config.json
index 123705fb47b9..572898ac481a 100644
--- a/swagger_to_sdk_config.json
+++ b/swagger_to_sdk_config.json
@@ -8,7 +8,7 @@
"gofmt -w ./services/"
],
"autorest_options": {
- "use": "@microsoft.azure/autorest.go@~2.1.134",
+ "use": "@microsoft.azure/autorest.go@~2.1.136",
"go": "",
"verbose": "",
"sdkrel:go-sdk-folder": ".",
diff --git a/tools/apidiff/repo/repo.go b/tools/apidiff/repo/repo.go
index bf7c4baa10ac..a0a6ba36a996 100644
--- a/tools/apidiff/repo/repo.go
+++ b/tools/apidiff/repo/repo.go
@@ -78,6 +78,17 @@ func (wt WorkingTree) Branch() (string, error) {
return "", fmt.Errorf("failed to determine active branch: %s", strings.Join(branches, ","))
}
+// DeleteBranch call "git branch -d branchname" to delete a local branch.
+func (wt WorkingTree) DeleteBranch(branchName string) error {
+ cmd := exec.Command("git", "branch", "-d", branchName)
+ cmd.Dir = wt.dir
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return errors.New(string(output))
+ }
+ return nil
+}
+
// Clone calls "git clone", cloning the working tree into the specified directory.
// The returned WorkingTree points to the clone of the repository.
func (wt WorkingTree) Clone(dest string) (result WorkingTree, err error) {
@@ -168,7 +179,32 @@ func (wt WorkingTree) ListTags(pattern string) ([]string, error) {
if err != nil {
return nil, errors.New(string(output))
}
+ if len(output) == 0 {
+ return []string{}, nil
+ }
tags := strings.Split(strings.TrimSpace(string(output)), "\n")
sort.Strings(tags)
return tags, nil
}
+
+// Pull calls "git pull upstream branch" to update local working tree.
+func (wt WorkingTree) Pull(upstream, branch string) error {
+ cmd := exec.Command("git", "pull", upstream, branch)
+ cmd.Dir = wt.dir
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return errors.New(string(output))
+ }
+ return nil
+}
+
+// CreateAndCheckout create and checkout to a new branch
+func (wt WorkingTree) CreateAndCheckout(branch string) error {
+ cmd := exec.Command("git", "checkout", "-b", branch)
+ cmd.Dir = wt.dir
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return errors.New(string(output))
+ }
+ return nil
+}
diff --git a/tools/internal/modinfo/modinfo.go b/tools/internal/modinfo/modinfo.go
index 1ab4232c0bc8..f97fc35dfff9 100644
--- a/tools/internal/modinfo/modinfo.go
+++ b/tools/internal/modinfo/modinfo.go
@@ -206,3 +206,9 @@ func (m module) NewModule() bool {
func (m module) GenerateReport() report.Package {
return report.Generate(m.lhs, m.rhs, false, false)
}
+
+// IsValidModuleVersion returns true if the provided string is a valid module version (e.g. v1.2.3).
+func IsValidModuleVersion(v string) bool {
+ r := regexp.MustCompile(`^v\d+\.\d+\.\d+$`)
+ return r.MatchString(v)
+}
diff --git a/tools/internal/modinfo/modinfo_test.go b/tools/internal/modinfo/modinfo_test.go
index ffea0aa50085..ee297fb8f78f 100644
--- a/tools/internal/modinfo/modinfo_test.go
+++ b/tools/internal/modinfo/modinfo_test.go
@@ -212,3 +212,15 @@ func TestCreateModuleNameFromPathFail(t *testing.T) {
t.Fatalf("expected empty module name, got %s", n)
}
}
+
+func TestIsValidModuleVersion(t *testing.T) {
+ if !IsValidModuleVersion("v10.21.23") {
+ t.Fatal("unexpected invalid module version")
+ }
+ if IsValidModuleVersion("1.2.3") {
+ t.Fatal("unexpected valid module version, missing v")
+ }
+ if IsValidModuleVersion("v11.563") {
+ t.Fatal("unexpected valid module version, missing patch")
+ }
+}
diff --git a/tools/major-updater/cmd/afterscripts.go b/tools/major-updater/cmd/afterscripts.go
new file mode 100644
index 000000000000..22983be96d5d
--- /dev/null
+++ b/tools/major-updater/cmd/afterscripts.go
@@ -0,0 +1,77 @@
+// Copyright 2018 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cmd
+
+import (
+ "fmt"
+ "os/exec"
+ "path"
+ "path/filepath"
+
+ "github.com/spf13/cobra"
+)
+
+var afterscriptsCmd = &cobra.Command{
+ Use: "afterscripts ",
+ Short: "Run afterscripts for SDK",
+ Long: `This command will run the afterscripts in SDK repo,
+ including generation of profiles, and formatting the generated code`,
+ Args: func(cmd *cobra.Command, args []string) error {
+ return cobra.ExactArgs(1)(cmd, args)
+ },
+ RunE: func(cmd *cobra.Command, args []string) error {
+ sdk := args[0]
+ err := theAfterscriptsCommand(sdk)
+ return err
+ },
+}
+
+func init() {
+ rootCmd.AddCommand(afterscriptsCmd)
+}
+
+func theAfterscriptsCommand(sdk string) error {
+ println("Generating profiles...")
+ absolutePathOfSDK, err := filepath.Abs(sdk)
+ if err != nil {
+ return fmt.Errorf("failed to get the directory of SDK: %v", err)
+ }
+ absolutePathOfProfiles := path.Join(absolutePathOfSDK, "profiles")
+ err = changeDir(absolutePathOfProfiles)
+ if err != nil {
+ return fmt.Errorf("failed to enter directory for profiles: %v", err)
+ }
+ c := exec.Command("go", "generate", "./...")
+ err = c.Run()
+ if err != nil {
+ return fmt.Errorf("Error occurs when generating profiles: %v", err)
+ }
+ vprintln("Formatting the whole SDK folder...")
+ err = changeDir(absolutePathOfSDK)
+ if err != nil {
+ return fmt.Errorf("failed to enter directory for SDK: %v", err)
+ }
+ c = exec.Command("gofmt", "-w", "./profiles/")
+ err = c.Run()
+ if err != nil {
+ return fmt.Errorf("Error occurs when formatting profiles: %v", err)
+ }
+ c = exec.Command("gofmt", "-w", "./services/")
+ err = c.Run()
+ if err != nil {
+ return fmt.Errorf("Error occurs when formatting the SDK folder: %v", err)
+ }
+ return nil
+}
diff --git a/tools/major-updater/cmd/autorest.go b/tools/major-updater/cmd/autorest.go
new file mode 100644
index 000000000000..3926765660c9
--- /dev/null
+++ b/tools/major-updater/cmd/autorest.go
@@ -0,0 +1,81 @@
+// Copyright 2018 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cmd
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/spf13/cobra"
+)
+
+var autorestCmd = &cobra.Command{
+ Use: "autorest ",
+ Short: "Execute autorest on specs, saving generated SDK code into SDK dir",
+ Long: `This command will execute autorest on the specs dir,
+ saving the generated SDK code into SDK dir, then runs some after-scripts`,
+ Args: func(cmd *cobra.Command, args []string) error {
+ return cobra.ExactArgs(2)(cmd, args)
+ },
+ RunE: func(cmd *cobra.Command, args []string) error {
+ sdk := args[0]
+ spec := args[1]
+ err := theAutorestCommand(sdk, spec)
+ return err
+ },
+}
+
+func init() {
+ rootCmd.AddCommand(autorestCmd)
+}
+
+func theAutorestCommand(sdk, spec string) error {
+ printf("Executing autorest (%d threads)\n", thread)
+ err := os.Setenv("NODE_OPTIONS", "--max-old-space-size=8192")
+ if err != nil {
+ return fmt.Errorf("failed to set environment variable: %v", err)
+ }
+ // get absolute path
+ absolutePathOfSDK, err := filepath.Abs(sdk)
+ if err != nil {
+ return fmt.Errorf("failed to get the directory of SDK: %v", err)
+ }
+ absolutePathOfSpec, err := filepath.Abs(spec)
+ if err != nil {
+ return fmt.Errorf("failed to get the directory of specs: %v", err)
+ }
+ // get every single readme.md file in the directory
+ files, err := selectFilesWithName(absolutePathOfSpec, readme)
+ vprintf("Found %d readme.md files\n", len(files))
+ jobs := make(chan work, 1000)
+ results := make(chan error, 1000)
+ for i := 0; i < thread; i++ {
+ go worker(i, jobs, results)
+ }
+ for _, file := range files {
+ w := work{
+ filename: file,
+ sdkFolder: absolutePathOfSDK,
+ }
+ jobs <- w
+ }
+ close(jobs)
+ for range files {
+ <-results
+ }
+ vprintln("autorest finished")
+ return nil
+}
diff --git a/tools/major-updater/cmd/common.go b/tools/major-updater/cmd/common.go
new file mode 100644
index 000000000000..452d9504dc15
--- /dev/null
+++ b/tools/major-updater/cmd/common.go
@@ -0,0 +1,64 @@
+// Copyright 2018 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cmd
+
+import (
+ "fmt"
+)
+
+func printf(format string, a ...interface{}) {
+ if !quietFlag {
+ fmt.Printf(format, a...)
+ }
+}
+
+func println(a ...interface{}) {
+ if !quietFlag {
+ fmt.Println(a...)
+ }
+}
+
+func dprintf(format string, a ...interface{}) {
+ if debugFlag {
+ printf(format, a...)
+ }
+}
+
+func dprintln(a ...interface{}) {
+ if debugFlag {
+ println(a...)
+ }
+}
+
+func vprintf(format string, a ...interface{}) {
+ if verboseFlag {
+ printf(format, a...)
+ }
+}
+
+func vprintln(a ...interface{}) {
+ if verboseFlag {
+ println(a...)
+ }
+}
+
+func contains(strings []string, str string) bool {
+ for _, s := range strings {
+ if s == str {
+ return true
+ }
+ }
+ return false
+}
diff --git a/tools/major-updater/cmd/dep.go b/tools/major-updater/cmd/dep.go
new file mode 100644
index 000000000000..34226aab8a4c
--- /dev/null
+++ b/tools/major-updater/cmd/dep.go
@@ -0,0 +1,51 @@
+// Copyright 2018 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cmd
+
+import (
+ "fmt"
+ "os/exec"
+ "strings"
+
+ "github.com/spf13/cobra"
+)
+
+var depCmd = &cobra.Command{
+ Use: "dep",
+ Short: "Calls dep command to execute dep ensure -update",
+ Long: "This command will invoke the dep ensure -update command",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ err := theDepCommand()
+ return err
+ },
+}
+
+func init() {
+ rootCmd.AddCommand(depCmd)
+}
+
+func theDepCommand() error {
+ println("Executing dep ensure...")
+ depArgs := "ensure -update"
+ if verboseFlag {
+ depArgs += " -v"
+ }
+ c := exec.Command("dep", strings.Split(depArgs, " ")...)
+ err := startCmd(c)
+ if err != nil {
+ return fmt.Errorf("failed to start command: %v", err)
+ }
+ return c.Wait()
+}
diff --git a/tools/major-updater/cmd/root.go b/tools/major-updater/cmd/root.go
new file mode 100644
index 000000000000..bd0f5413652d
--- /dev/null
+++ b/tools/major-updater/cmd/root.go
@@ -0,0 +1,163 @@
+// Copyright 2018 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cmd
+
+import (
+ "fmt"
+ "os"
+ "os/signal"
+ "path/filepath"
+ "regexp"
+
+ "github.com/Azure/azure-sdk-for-go/tools/apidiff/repo"
+ "github.com/spf13/cobra"
+)
+
+const (
+ latest = "latest"
+ master = "master"
+ specUpstream = "origin"
+ branchPattern = "major-version-release-v%d.0.0"
+ readme = "readme.md"
+)
+
+// flags
+var upstream string
+var quietFlag bool
+var debugFlag bool
+var verboseFlag bool
+var thread int
+
+// global variables
+var initialBranch string
+var pattern *regexp.Regexp
+var majorVersion int
+var majorBranchName *string
+
+var rootCmd = &cobra.Command{
+ Use: "major-updater ",
+ Short: "Do a whole procedure of monthly regular major update",
+ Long: `This tool will execute a procedure of releasing a new major update of the azure-sdk-for-go`,
+ Args: func(cmd *cobra.Command, args []string) error {
+ return cobra.ExactArgs(2)(cmd, args)
+ },
+ RunE: func(cmd *cobra.Command, args []string) error {
+ absSDK, absSpec, err := absPaths(args[0], args[1])
+ captureSigInt(absSDK)
+ if err != nil {
+ return err
+ }
+ err = theCommand(absSDK, absSpec)
+ if err != nil {
+ deleteMajorBranch(absSDK)
+ }
+ return err
+ },
+}
+
+func init() {
+ pattern = regexp.MustCompile(`^v([0-9]+)\..*$`)
+ rootCmd.PersistentFlags().StringVar(&upstream, "upstream", "origin", "specify the upstream of the SDK repo")
+ rootCmd.PersistentFlags().IntVarP(&thread, "thread", "t", 4, "thread count when executing autorest")
+ rootCmd.PersistentFlags().BoolVarP(&quietFlag, "quiet", "q", false, "quiet output")
+ rootCmd.PersistentFlags().BoolVarP(&debugFlag, "debug", "d", false, "debug output")
+ rootCmd.PersistentFlags().BoolVarP(&verboseFlag, "verbose", "v", false, "verbose output")
+}
+
+func captureSigInt(absSDK string) {
+ c := make(chan os.Signal, 1)
+ signal.Notify(c, os.Interrupt)
+ go func() {
+ for sig := range c {
+ // sig is a ^C, handle it
+ printf("Receiving %v signal, aborting...", sig)
+ deleteMajorBranch(absSDK)
+ os.Exit(1)
+ }
+ }()
+}
+
+// Execute executes the specified command.
+func Execute() {
+ if err := rootCmd.Execute(); err != nil {
+ os.Exit(1)
+ }
+}
+
+func theCommand(absSDK, absSpec string) error {
+ verboseStatus(absSDK, absSpec)
+ var err error
+ if err = theUpdateSDKCommand(absSDK); err != nil {
+ return fmt.Errorf("failed to update SDK repo: %v", err)
+ }
+ if err = theDepCommand(); err != nil {
+ return fmt.Errorf("failed to run dep: %v", err)
+ }
+ if err = theUpdateSpecsCommand(absSpec); err != nil {
+ return fmt.Errorf("failed to update specs repo: %v", err)
+ }
+ if err = theAutorestCommand(absSDK, absSpec); err != nil {
+ return fmt.Errorf("failed to execute autorest: %v", err)
+ }
+ if err = theAfterscriptsCommand(absSDK); err != nil {
+ return fmt.Errorf("failed to execute afterscripts: %v", err)
+ }
+ return nil
+}
+
+func absPaths(sdk, spec string) (string, string, error) {
+ absSDK, err := filepath.Abs(sdk)
+ if err != nil {
+ return "", "", fmt.Errorf("failed to get directory of SDK: %v", err)
+ }
+ absSpec, err := filepath.Abs(spec)
+ if err != nil {
+ return "", "", fmt.Errorf("failed to get directory of specification: %v", err)
+ }
+ return absSDK, absSpec, nil
+}
+
+func verboseStatus(sdk, spec string) {
+ if verboseFlag {
+ vprintf("SDK directory: %s\nSpecification directory: %s\n", sdk, spec)
+ }
+}
+
+func createNewBranch(wt repo.WorkingTree, name string) error {
+ vprintf("creating branch %s\n", name)
+ err := wt.CreateAndCheckout(name)
+ return err
+}
+
+func deleteMajorBranch(sdk string) {
+ if majorBranchName == nil {
+ return
+ }
+ changeDir(sdk)
+ wt, err := repo.Get(sdk)
+ if err != nil {
+ return
+ }
+ wt.Checkout(latest)
+ wt.DeleteBranch(*majorBranchName)
+}
+
+func changeDir(path string) error {
+ err := os.Chdir(path)
+ if err != nil {
+ return fmt.Errorf("failed to change directory to %s: %v", path, err)
+ }
+ return nil
+}
diff --git a/tools/major-updater/cmd/updateSDK.go b/tools/major-updater/cmd/updateSDK.go
new file mode 100644
index 000000000000..8cc042104f9e
--- /dev/null
+++ b/tools/major-updater/cmd/updateSDK.go
@@ -0,0 +1,105 @@
+// Copyright 2018 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cmd
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+
+ "github.com/Azure/azure-sdk-for-go/tools/apidiff/repo"
+ "github.com/spf13/cobra"
+)
+
+var updateSDKCmd = &cobra.Command{
+ Use: "updateSDK ",
+ Short: "Update the SDK repo on latest branch",
+ Long: `This command will checkout to latest branch in SDK repo,
+ find next major version number based on tags, then create a new branch based on the latest branch`,
+ Args: func(cmd *cobra.Command, args []string) error {
+ return cobra.ExactArgs(1)(cmd, args)
+ },
+ RunE: func(cmd *cobra.Command, args []string) error {
+ sdk := args[0]
+ err := theUpdateSDKCommand(sdk)
+ return err
+ },
+}
+
+func init() {
+ rootCmd.AddCommand(updateSDKCmd)
+}
+
+func theUpdateSDKCommand(sdk string) error {
+ println("Updating SDK repo...")
+ absolutePathOfSDK, err := filepath.Abs(sdk)
+ if err != nil {
+ return fmt.Errorf("failed to get the directory of SDK: %v", err)
+ }
+ err = changeDir(absolutePathOfSDK)
+ if err != nil {
+ return fmt.Errorf("failed to change directory to %s: %v", absolutePathOfSDK, err)
+ }
+ cwd, err := os.Getwd()
+ if err != nil {
+ return fmt.Errorf("failed to get the current working directory: %v", err)
+ }
+ wt, err := repo.Get(cwd)
+ if err != nil {
+ return fmt.Errorf("failed to get the working tree: %v", err)
+ }
+ initialBranch, err = wt.Branch()
+ if err != nil {
+ return fmt.Errorf("failed to get the current branch: %v", err)
+ }
+ currentMajorVersion, err := findNextMajorVersionNumber(wt)
+ majorVersion = currentMajorVersion + 1
+ printf("Next major version: %d\n", majorVersion)
+ vprintf("Checking out to latest branch in %s\n", cwd)
+ err = wt.Checkout(latest)
+ if err != nil {
+ return fmt.Errorf("checkout failed: %v", err)
+ }
+ err = wt.Pull(upstream, latest)
+ if err != nil {
+ return fmt.Errorf("pull failed: %v", err)
+ }
+ vprintf("Checking out to new branch based on %s", latest)
+ branchName := fmt.Sprintf(branchPattern, majorVersion)
+ err = createNewBranch(wt, branchName)
+ if err != nil {
+ return fmt.Errorf("checkout failed: %v", err)
+ }
+ majorBranchName = &branchName
+ return nil
+}
+
+func findNextMajorVersionNumber(wt repo.WorkingTree) (int, error) {
+ tags, err := wt.ListTags("v*")
+ if err != nil {
+ return 0, fmt.Errorf("failed to list tags: %v", err)
+ }
+ number := 0
+ for _, tag := range tags {
+ matches := pattern.FindStringSubmatch(tag)
+ cc, err := strconv.ParseInt(matches[1], 10, 32)
+ c := int(cc)
+ if err == nil && c > number {
+ number = c
+ }
+ }
+ return number, nil
+}
diff --git a/tools/major-updater/cmd/updateSpecs.go b/tools/major-updater/cmd/updateSpecs.go
new file mode 100644
index 000000000000..d0afaa9103f1
--- /dev/null
+++ b/tools/major-updater/cmd/updateSpecs.go
@@ -0,0 +1,74 @@
+// Copyright 2018 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cmd
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/Azure/azure-sdk-for-go/tools/apidiff/repo"
+ "github.com/spf13/cobra"
+)
+
+var updateSpecsCmd = &cobra.Command{
+ Use: "updateSpec ",
+ Short: "Update the specs repo on master branch",
+ Long: `This command will change the working directory to the specs folder,
+ checkout to master branch and update it`,
+ Args: func(cmd *cobra.Command, args []string) error {
+ return cobra.ExactArgs(1)(cmd, args)
+ },
+ RunE: func(cmd *cobra.Command, args []string) error {
+ specs := args[0]
+ err := theUpdateSpecsCommand(specs)
+ return err
+ },
+}
+
+func init() {
+ rootCmd.AddCommand(updateSpecsCmd)
+}
+
+func theUpdateSpecsCommand(spec string) error {
+ vprintln("Updating specs repo...")
+ absolutePathOfSpec, err := filepath.Abs(spec)
+ if err != nil {
+ return fmt.Errorf("failed to get the directory of specs: %v", err)
+ }
+ err = changeDir(absolutePathOfSpec)
+ if err != nil {
+ return fmt.Errorf("failed to change directory to %s: %v", absolutePathOfSpec, err)
+ }
+ cwd, err := os.Getwd()
+ if err != nil {
+ return fmt.Errorf("failed to get the current working directory: %v", err)
+ }
+ wt, err := repo.Get(cwd)
+ if err != nil {
+ return fmt.Errorf("failed to get the working tree: %v", err)
+ }
+ vprintf("Checking out to %s branch in %s\n", master, cwd)
+ err = wt.Checkout(master)
+ if err != nil {
+ return fmt.Errorf("checkout failed: %v", err)
+ }
+ vprintf("Pulling %s branch in %s\n", master, cwd)
+ err = wt.Pull(specUpstream, master)
+ if err != nil {
+ return fmt.Errorf("pull failed: %v", err)
+ }
+ return nil
+}
diff --git a/tools/major-updater/cmd/work.go b/tools/major-updater/cmd/work.go
new file mode 100644
index 000000000000..060f56fffb20
--- /dev/null
+++ b/tools/major-updater/cmd/work.go
@@ -0,0 +1,90 @@
+// Copyright 2018 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cmd
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "time"
+)
+
+const (
+ autorestArgsPattern = "--use=@microsoft.azure/autorest.go@~2.1.99 %s --go --multiapi --go-sdk-folder=%s --use-onever"
+)
+
+type work struct {
+ filename string
+ sdkFolder string
+}
+
+func autorestCommand(file string, sdk string) *exec.Cmd {
+ autorestArgs := fmt.Sprintf(autorestArgsPattern, file, sdk)
+ c := exec.Command("autorest", strings.Split(autorestArgs, " ")...)
+ return c
+}
+
+func worker(id int, jobs <-chan work, results chan<- error) {
+ for work := range jobs {
+ start := time.Now()
+ c := autorestCommand(work.filename, work.sdkFolder)
+ vprintf("worker %d is starting on file %s\nparameters: %v\n", id, work.filename, c.Args)
+ output, err := c.CombinedOutput()
+ if err == nil {
+ vprintf("worker %d has done with file %s (%v)\n", id, work.filename, time.Since(start))
+ } else {
+ printf("worker %d fails with file %s (%v), error messages:\n%v\n", id, work.filename, time.Since(start), string(output))
+ }
+ results <- err
+ }
+}
+
+func startCmd(c *exec.Cmd) error {
+ stdout, err := c.StdoutPipe()
+ if err != nil {
+ return fmt.Errorf("failed to get stdout pipe: %v", err)
+ }
+ scanner := bufio.NewScanner(stdout)
+ go func() {
+ for scanner.Scan() {
+ printf("> %s\n", scanner.Text())
+ }
+ }()
+ stderr, err := c.StderrPipe()
+ if err != nil {
+ return fmt.Errorf("failed to get stderr pipe: %v", err)
+ }
+ scanner = bufio.NewScanner(stderr)
+ go func() {
+ for scanner.Scan() {
+ printf("> %s\n", scanner.Text())
+ }
+ }()
+ return c.Start()
+}
+
+func selectFilesWithName(path string, name string) ([]string, error) {
+ var files []string
+ err := filepath.Walk(path, func(p string, info os.FileInfo, err error) error {
+ if !info.IsDir() && info.Name() == name {
+ files = append(files, p)
+ }
+ return nil
+ })
+ return files, err
+}
diff --git a/tools/major-updater/main.go b/tools/major-updater/main.go
new file mode 100644
index 000000000000..21e75cb9e8dd
--- /dev/null
+++ b/tools/major-updater/main.go
@@ -0,0 +1,21 @@
+// Copyright 2018 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import "github.com/Azure/azure-sdk-for-go/tools/major-updater/cmd"
+
+func main() {
+ cmd.Execute()
+}
diff --git a/tools/profileBuilder/cmd/latest.go b/tools/profileBuilder/cmd/latest.go
index 95bd942f0078..45060cdfe0e7 100644
--- a/tools/profileBuilder/cmd/latest.go
+++ b/tools/profileBuilder/cmd/latest.go
@@ -17,6 +17,7 @@
package cmd
import (
+ "fmt"
"io/ioutil"
"log"
"os"
@@ -64,6 +65,7 @@ By default, this command ignores API versions that are in preview.`,
}
outputRootDir = abs
}
+ fmt.Printf("Executes profileBuilder in %s\n", outputRootDir)
outputLog.Printf("Output-Location set to: %s", outputRootDir)
includePreview, err := cmd.Flags().GetBool(previewLongName)
diff --git a/tools/profileBuilder/cmd/list.go b/tools/profileBuilder/cmd/list.go
index a621dc2b9d1d..887d75db9615 100644
--- a/tools/profileBuilder/cmd/list.go
+++ b/tools/profileBuilder/cmd/list.go
@@ -119,6 +119,7 @@ $> ../model/testdata/smallProfile.txt > profileBuilder list --name small_profile
outputRootDir = filepath.Join(outputRootDir, modver)
}
}
+ fmt.Printf("Executes profileBuilder in %s\n", outputRootDir)
outputLog.Printf("Output-Location set to: %s", outputRootDir)
if clearOutputFlag {
if err := dirs.DeleteChildDirs(outputRootDir); err != nil {
diff --git a/tools/versioner/cmd/root.go b/tools/versioner/cmd/root.go
index f9d39e4a12b8..6c254e97c4a7 100644
--- a/tools/versioner/cmd/root.go
+++ b/tools/versioner/cmd/root.go
@@ -32,15 +32,19 @@ import (
)
var rootCmd = &cobra.Command{
- Use: "versioner ",
+ Use: "versioner [initial module version]",
Short: "Creates or updates the latest major version for a package from staged content.",
Long: `This tool will compare a staged package against its latest major version to detect
breaking changes. If there are no breaking changes the latest major version is updated
with the staged content. If there are breaking changes the staged content becomes the
next latest major vesion and the go.mod file is updated.
+The default version for new modules is v1.0.0 or the value specified for [initial module version].
`,
Args: func(cmd *cobra.Command, args []string) error {
- if err := cobra.ExactArgs(1)(cmd, args); err != nil {
+ if err := cobra.MinimumNArgs(1)(cmd, args); err != nil {
+ return err
+ }
+ if err := cobra.MaximumNArgs(2)(cmd, args); err != nil {
return err
}
return nil
@@ -55,6 +59,8 @@ var (
semverRegex = regexp.MustCompile(`v\d+\.\d+\.\d+$`)
// this is used so tests can hook getTags() to return whatever tags
getTagsHook func(string, string) ([]string, error)
+ // default version to start a module at if not specified
+ startingModVer = "v1.0.0"
)
func init() {
@@ -81,6 +87,12 @@ func theCommand(args []string) error {
// does the actual work
func theCommandImpl(args []string) (string, error) {
stage := filepath.Clean(args[0])
+ if len(args) == 2 {
+ if !modinfo.IsValidModuleVersion(args[1]) {
+ return "", fmt.Errorf("the string '%s' is not a valid module version", args[1])
+ }
+ startingModVer = args[1]
+ }
lmv, err := findLatestMajorVersion(stage)
if err != nil {
return "", fmt.Errorf("failed to find latest major version: %v", err)
@@ -273,13 +285,16 @@ func calculateModuleTag(tags []string, mod modinfo.Provider) (string, error) {
if mod.BreakingChanges() {
return tagPrefix + ".0.0", nil
}
+ if len(tags) == 0 {
+ if mod.VersionSuffix() {
+ panic("module contains a version suffix but no tags were found")
+ }
+ // this is the first module version
+ return tagPrefix + "/" + startingModVer, nil
+ }
if !mod.VersionSuffix() {
tagPrefix = tagPrefix + "/v1"
}
- if len(tags) == 0 {
- // this is v1.0.0
- return tagPrefix + ".0.0", nil
- }
tag := tags[len(tags)-1]
v := semverRegex.FindString(tag)
if v == "" {
diff --git a/version/version.go b/version/version.go
index b7139293081a..62ffa241343b 100644
--- a/version/version.go
+++ b/version/version.go
@@ -18,4 +18,4 @@ package version
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
// Number contains the semantic version of this SDK.
-const Number = "v32.6.0"
+const Number = "v33.0.0"